diff --git a/.devcontainer/.poetry_cache/.keep b/.devcontainer/.poetry_cache/.keep index 8b1378917..e69de29bb 100644 --- a/.devcontainer/.poetry_cache/.keep +++ b/.devcontainer/.poetry_cache/.keep @@ -1 +0,0 @@ - diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 5e44d22c4..fd2259f0b 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -11,21 +11,25 @@ "command": "poetry self add poetry-polylith-plugin" }, "ghcr.io/devcontainers/features/docker-in-docker:2": {}, - "ghcr.io/mpriscella/features/kind:1": {}, "ghcr.io/devcontainers-contrib/features/gh-release:1": { "repo": "authzed/zed", - "binaryNames": "zed" + "binaryNames": "zed", + "version": "v0.21.1" }, "ghcr.io/devcontainers-contrib/features/spicedb:1": {}, "ghcr.io/devcontainers/features/kubectl-helm-minikube:1": { "minikube": "none" }, - "ghcr.io/eitsupi/devcontainer-features/jq-likes:2": {}, + "ghcr.io/eitsupi/devcontainer-features/jq-likes:2": { + "jqVersion": "latest", + "yqVersion": "latest" + }, "ghcr.io/dhoeric/features/k9s:1": {}, "ghcr.io/EliiseS/devcontainer-features/bash-profile:1": { "command": "alias k=kubectl" }, - "ghcr.io/devcontainers-contrib/features/rclone:1": {} + "ghcr.io/devcontainers-contrib/features/rclone:1": {}, + "./k3d": {} }, "overrideFeatureInstallOrder": [ "ghcr.io/devcontainers-contrib/features/poetry", diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml index 08dca9ca0..1dc6a27a1 100644 --- a/.devcontainer/docker-compose.yml +++ b/.devcontainer/docker-compose.yml @@ -23,6 +23,8 @@ services: ZED_TOKEN: renku ZED_INSECURE: "true" POETRY_CACHE_DIR: "/poetry_cache" + NB_SERVER_OPTIONS__DEFAULTS_PATH: /workspace/server_defaults.json + NB_SERVER_OPTIONS__UI_CHOICES_PATH: /workspace/server_options.json network_mode: service:db depends_on: - db @@ -43,6 +45,7 @@ services: - "8080:8080" - "5678:5678" - "50051:50051" + - "8888:80" swagger: image: swaggerapi/swagger-ui diff --git a/.devcontainer/k3d/devcontainer-feature.json b/.devcontainer/k3d/devcontainer-feature.json new file mode 100644 index 000000000..ca0423f94 --- /dev/null +++ b/.devcontainer/k3d/devcontainer-feature.json @@ -0,0 +1,17 @@ +{ + "id": "k3d", + "version": "1.0.0", + "name": "k3s based kubernetes cluster in docker", + "postCreateCommand": "k3d --version", + "installsAfter": [ + "ghcr.io/devcontainers-contrib/features/bash-command" + ], + "options": { + "k3d_version": { + "type": "string", + "description": "k3d version to install", + "proposals": ["latest", "5.7.4"], + "default": "latest" + } + } +} diff --git a/.devcontainer/k3d/install.sh b/.devcontainer/k3d/install.sh new file mode 100644 index 000000000..2a699ff35 --- /dev/null +++ b/.devcontainer/k3d/install.sh @@ -0,0 +1,14 @@ +if [ "${K3D_VERSION}" != "none" ]; then + echo "Downloading k3d..." + if [ "${K3D_VERSION}" = "latest" ]; then + # Install and check the hash + curl -sSL https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | bash + else + find_version_from_git_tags K3D_VERSION https://github.com/kubernetes/K3D + if [ "${K3D_VERSION::1}" != "v" ]; then + K3D_VERSION="v${K3D_VERSION}" + fi + # Install and check the hash + curl -sSL https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | TAG="${K3D_VERSION}" bash + fi +fi diff --git a/.github/workflows/acceptance-tests.yml b/.github/workflows/acceptance-tests.yml index e3b14c4e4..b254c5b89 100644 --- a/.github/workflows/acceptance-tests.yml +++ b/.github/workflows/acceptance-tests.yml @@ -25,6 +25,8 @@ jobs: renku-graph: ${{ steps.deploy-comment.outputs.renku-graph}} renku-notebooks: ${{ steps.deploy-comment.outputs.renku-notebooks}} renku-ui: ${{ steps.deploy-comment.outputs.renku-ui}} + amalthea-sessions: ${{ steps.deploy-comment.outputs.amalthea-sessions}} + amalthea: ${{ steps.deploy-comment.outputs.amalthea}} test-enabled: ${{ steps.deploy-comment.outputs.test-enabled}} test-cypress-enabled: ${{ steps.deploy-comment.outputs.test-cypress-enabled}} persist: ${{ steps.deploy-comment.outputs.persist}} @@ -84,6 +86,8 @@ jobs: renku_graph: "${{ needs.check-deploy.outputs.renku-graph }}" renku_notebooks: "${{ needs.check-deploy.outputs.renku-notebooks }}" renku_data_services: "@${{ github.head_ref }}" + amalthea: "${{ needs.check-deploy.outputs.amalthea }}" + amalthea_sessions: "${{ needs.check-deploy.outputs.amalthea-sessions }}" extra_values: "${{ needs.check-deploy.outputs.extra-values }}" selenium-acceptance-tests: diff --git a/.github/workflows/save_cache.yml b/.github/workflows/save_cache.yml new file mode 100644 index 000000000..1230d6b58 --- /dev/null +++ b/.github/workflows/save_cache.yml @@ -0,0 +1,41 @@ +name: Create cache from commits on main + +on: + push: + branches: + - main + - chore-add-kind + workflow_dispatch: + + +jobs: + save-poetry-cache: + runs-on: ubuntu-latest + env: + CACHE_KEY: main-branch-poetry-cache-ubuntu + CACHE_PATH: .devcontainer/.poetry_cache + DEVCONTAINER_IMAGE_CACHE: ghcr.io/swissdatasciencecenter/renku-data-services/devcontainer + + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Install python deps + uses: devcontainers/ci@v0.3 + with: + runCmd: poetry install --with dev + push: always + skipContainerUserIdUpdate: false + imageName: ${{ env.DEVCONTAINER_IMAGE_CACHE }} + cacheFrom: ${{ env.DEVCONTAINER_IMAGE_CACHE }} + - uses: actions/cache/save@v3 + name: Create cache + with: + path: ${{ env.CACHE_PATH }} + key: ${{ env.CACHE_KEY }} diff --git a/.github/workflows/test_publish.yml b/.github/workflows/test_publish.yml index e34758384..d2af9b2e4 100644 --- a/.github/workflows/test_publish.yml +++ b/.github/workflows/test_publish.yml @@ -70,6 +70,11 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 + - uses: actions/cache/restore@v3 + name: Restore cache + with: + path: ${{ env.CACHE_PATH }} + key: ${{ env.CACHE_KEY }} - name: Set Git config shell: bash run: | @@ -90,6 +95,11 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 + - uses: actions/cache/restore@v3 + name: Restore cache + with: + path: ${{ env.CACHE_PATH }} + key: ${{ env.CACHE_KEY }} - name: Set Git config shell: bash run: | @@ -111,6 +121,11 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 + - uses: actions/cache/restore@v3 + name: Restore cache + with: + path: ${{ env.CACHE_PATH }} + key: ${{ env.CACHE_KEY }} - name: Set Git config shell: bash run: | @@ -155,6 +170,11 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 + - uses: actions/cache/restore@v3 + name: Restore cache + with: + path: ${{ env.CACHE_PATH }} + key: ${{ env.CACHE_KEY }} - name: Set Git config shell: bash run: | diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e3b711e93..1cb8f5931 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -18,8 +18,10 @@ repos: - id: check-toml - id: debug-statements - id: end-of-file-fixer + exclude: 'components/renku_data_services/message_queue/(avro_models|schemas)' - id: mixed-line-ending - id: trailing-whitespace + exclude: 'components/renku_data_services/message_queue/(avro_models|schemas)' - repo: https://github.com/asottile/yesqa rev: v1.5.0 hooks: diff --git a/Makefile b/Makefile index 5b588b2e5..03727817a 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ -.PHONY: schemas tests test_setup main_tests schemathesis_tests collect_coverage style_checks pre_commit_checks run download_avro check_avro avro_models update_avro kind_cluster install_amaltheas all +.PHONY: schemas tests test_setup main_tests schemathesis_tests collect_coverage style_checks pre_commit_checks run download_avro check_avro avro_models update_avro k3d_cluster install_amaltheas all -AMALTHEA_JS_VERSION ?= 0.11.0 -AMALTHEA_SESSIONS_VERSION ?= 0.0.1-new-operator-chart +AMALTHEA_JS_VERSION ?= 0.12.2 +AMALTHEA_SESSIONS_VERSION ?= 0.0.10-new-operator-chart codegen_params = --input-file-type openapi --output-model-type pydantic_v2.BaseModel --use-double-quotes --target-python-version 3.12 --collapse-root-models --field-constraints --strict-nullable --set-default-enum-member --openapi-scopes schemas paths parameters --set-default-enum-member --use-one-literal-as-default --use-default define test_apispec_up_to_date @@ -38,10 +38,23 @@ components/renku_data_services/notebooks/apispec.py: components/renku_data_servi poetry run datamodel-codegen --input components/renku_data_services/notebooks/api.spec.yaml --output components/renku_data_services/notebooks/apispec.py --base-class renku_data_services.notebooks.apispec_base.BaseAPISpec $(codegen_params) components/renku_data_services/platform/apispec.py: components/renku_data_services/platform/api.spec.yaml poetry run datamodel-codegen --input components/renku_data_services/platform/api.spec.yaml --output components/renku_data_services/platform/apispec.py --base-class renku_data_services.platform.apispec_base.BaseAPISpec $(codegen_params) +components/renku_data_services/data_connectors/apispec.py: components/renku_data_services/data_connectors/api.spec.yaml + poetry run datamodel-codegen --input components/renku_data_services/data_connectors/api.spec.yaml --output components/renku_data_services/data_connectors/apispec.py --base-class renku_data_services.data_connectors.apispec_base.BaseAPISpec $(codegen_params) ##@ Apispec -schemas: components/renku_data_services/crc/apispec.py components/renku_data_services/storage/apispec.py components/renku_data_services/users/apispec.py components/renku_data_services/project/apispec.py components/renku_data_services/namespace/apispec.py components/renku_data_services/secrets/apispec.py components/renku_data_services/connected_services/apispec.py components/renku_data_services/repositories/apispec.py components/renku_data_services/notebooks/apispec.py components/renku_data_services/platform/apispec.py ## Generate pydantic classes from apispec yaml files +schemas: components/renku_data_services/crc/apispec.py \ +components/renku_data_services/storage/apispec.py \ +components/renku_data_services/users/apispec.py \ +components/renku_data_services/project/apispec.py \ +components/renku_data_services/session/apispec.py \ +components/renku_data_services/namespace/apispec.py \ +components/renku_data_services/secrets/apispec.py \ +components/renku_data_services/connected_services/apispec.py \ +components/renku_data_services/repositories/apispec.py \ +components/renku_data_services/notebooks/apispec.py \ +components/renku_data_services/platform/apispec.py \ +components/renku_data_services/data_connectors/apispec.py ## Generate pydantic classes from apispec yaml files @echo "generated classes based on ApiSpec" ##@ Avro schemas @@ -84,6 +97,8 @@ style_checks: ## Run linting and style checks @$(call test_apispec_up_to_date,"notebooks") @echo "checking platform apispec is up to date" @$(call test_apispec_up_to_date,"platform") + @echo "checking session apispec is up to date" + @$(call test_apispec_up_to_date,"session") poetry run mypy poetry run ruff format --check poetry run ruff check . @@ -131,17 +146,17 @@ help: ## Display this help. ##@ Helm/k8s -kind_cluster: ## Creates a kind cluster for testing - kind delete cluster - docker network rm -f kind - docker network create -d=bridge -o com.docker.network.bridge.enable_ip_masquerade=true -o com.docker.network.driver.mtu=1500 --ipv6=false kind - kind create cluster --config kind_config.yaml - kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml - echo "Waiting for ingress controller to initialize" - sleep 15 - kubectl wait --namespace ingress-nginx --for=condition=ready pod --selector=app.kubernetes.io/component=controller --timeout=90s - -install_amaltheas: ## Installs both version of amalthea in the currently active k8s context. +k3d_cluster: ## Creates a k3d cluster for testing + k3d cluster delete + k3d cluster create --agents 1 --k3s-arg --disable=metrics-server@server:0 + +install_amaltheas: ## Installs both version of amalthea in the. NOTE: It uses the currently active k8s context. helm repo add renku https://swissdatasciencecenter.github.io/helm-charts - helm install amalthea-js renku/amalthea --version $(AMALTHEA_JS_VERSION) - helm install amalthea-sessions renku/amalthea-sessions --version $(AMALTHEA_SESSIONS_VERSION) + helm repo update + helm upgrade --install amalthea-js renku/amalthea --version $(AMALTHEA_JS_VERSION) + helm upgrade --install amalthea-se renku/amalthea-sessions --version ${AMALTHEA_SESSIONS_VERSION} + +# TODO: Add the version variables from the top of the file here when the charts are fully published +amalthea_schema: ## Updates generates pydantic classes from CRDs + curl https://raw.githubusercontent.com/SwissDataScienceCenter/amalthea/feat-add-cloud-storage/config/crd/bases/amalthea.dev_amaltheasessions.yaml | yq '.spec.versions[0].schema.openAPIV3Schema' | poetry run datamodel-codegen --input-file-type jsonschema --output-model-type pydantic_v2.BaseModel --output components/renku_data_services/notebooks/cr_amalthea_session.py --use-double-quotes --target-python-version 3.12 --collapse-root-models --field-constraints --strict-nullable --base-class renku_data_services.notebooks.cr_base.BaseCRD --allow-extra-fields --use-default-kwarg + curl https://raw.githubusercontent.com/SwissDataScienceCenter/amalthea/main/controller/crds/jupyter_server.yaml | yq '.spec.versions[0].schema.openAPIV3Schema' | poetry run datamodel-codegen --input-file-type jsonschema --output-model-type pydantic_v2.BaseModel --output components/renku_data_services/notebooks/cr_jupyter_server.py --use-double-quotes --target-python-version 3.12 --collapse-root-models --field-constraints --strict-nullable --base-class renku_data_services.notebooks.cr_base.BaseCRD --allow-extra-fields --use-default-kwarg diff --git a/bases/renku_data_services/data_api/app.py b/bases/renku_data_services/data_api/app.py index 2765aa542..b87a0ef9c 100644 --- a/bases/renku_data_services/data_api/app.py +++ b/bases/renku_data_services/data_api/app.py @@ -15,12 +15,14 @@ ResourcePoolUsersBP, UserResourcePoolsBP, ) +from renku_data_services.data_connectors.blueprints import DataConnectorsBP from renku_data_services.namespace.blueprints import GroupsBP +from renku_data_services.notebooks.blueprints import NotebooksBP, NotebooksNewBP from renku_data_services.platform.blueprints import PlatformConfigBP from renku_data_services.project.blueprints import ProjectsBP from renku_data_services.repositories.blueprints import RepositoriesBP from renku_data_services.session.blueprints import EnvironmentsBP, SessionLaunchersBP -from renku_data_services.storage.blueprints import StorageBP, StorageSchemaBP, StoragesV2BP +from renku_data_services.storage.blueprints import StorageBP, StorageSchemaBP from renku_data_services.users.blueprints import KCUsersBP, UserPreferencesBP, UserSecretsBP @@ -74,12 +76,6 @@ def register_all_handlers(app: Sanic, config: Config) -> Sanic: storage_repo=config.storage_repo, authenticator=config.gitlab_authenticator, ) - storages_v2 = StoragesV2BP( - name="storages_v2", - url_prefix=url_prefix, - storage_v2_repo=config.storage_v2_repo, - authenticator=config.authenticator, - ) storage_schema = StorageSchemaBP(name="storage_schema", url_prefix=url_prefix) user_preferences = UserPreferencesBP( name="user_preferences", @@ -134,12 +130,40 @@ def register_all_handlers(app: Sanic, config: Config) -> Sanic: authenticator=config.authenticator, internal_gitlab_authenticator=config.gitlab_authenticator, ) + notebooks = NotebooksBP( + name="notebooks_old", + url_prefix=url_prefix, + authenticator=config.authenticator, + nb_config=config.nb_config, + internal_gitlab_authenticator=config.gitlab_authenticator, + git_repo=config.git_repositories_repo, + rp_repo=config.rp_repo, + ) + notebooks_new = NotebooksNewBP( + name="notebooks", + url_prefix=url_prefix, + authenticator=config.authenticator, + nb_config=config.nb_config, + project_repo=config.project_repo, + session_repo=config.session_repo, + rp_repo=config.rp_repo, + data_connector_repo=config.data_connector_repo, + data_connector_project_link_repo=config.data_connector_to_project_link_repo, + internal_gitlab_authenticator=config.gitlab_authenticator, + ) platform_config = PlatformConfigBP( name="platform_config", url_prefix=url_prefix, platform_repo=config.platform_repo, authenticator=config.authenticator, ) + data_connectors = DataConnectorsBP( + name="data_connectors", + url_prefix=url_prefix, + data_connector_repo=config.data_connector_repo, + data_connector_to_project_link_repo=config.data_connector_to_project_link_repo, + authenticator=config.authenticator, + ) app.blueprint( [ resource_pools.blueprint(), @@ -150,7 +174,6 @@ def register_all_handlers(app: Sanic, config: Config) -> Sanic: user_secrets.blueprint(), user_resource_pools.blueprint(), storage.blueprint(), - storages_v2.blueprint(), storage_schema.blueprint(), user_preferences.blueprint(), misc.blueprint(), @@ -161,7 +184,10 @@ def register_all_handlers(app: Sanic, config: Config) -> Sanic: oauth2_clients.blueprint(), oauth2_connections.blueprint(), repositories.blueprint(), + notebooks.blueprint(), + notebooks_new.blueprint(), platform_config.blueprint(), + data_connectors.blueprint(), ] ) diff --git a/components/renku_data_services/app_config/config.py b/components/renku_data_services/app_config/config.py index 7e988ab2c..99505d616 100644 --- a/components/renku_data_services/app_config/config.py +++ b/components/renku_data_services/app_config/config.py @@ -25,6 +25,7 @@ import renku_data_services.base_models as base_models import renku_data_services.connected_services import renku_data_services.crc +import renku_data_services.data_connectors import renku_data_services.platform import renku_data_services.repositories import renku_data_services.storage @@ -43,6 +44,7 @@ ServerOptionsDefaults, generate_default_resource_pool, ) +from renku_data_services.data_connectors.db import DataConnectorProjectLinkRepository, DataConnectorRepository from renku_data_services.db_config import DBConfig from renku_data_services.git.gitlab import DummyGitlabAPI, GitlabAPI from renku_data_services.k8s.clients import DummyCoreClient, DummySchedulingClient, K8sCoreClient, K8sSchedulingClient @@ -52,12 +54,13 @@ from renku_data_services.message_queue.interface import IMessageQueue from renku_data_services.message_queue.redis_queue import RedisQueue from renku_data_services.namespace.db import GroupRepository +from renku_data_services.notebooks.config import NotebooksConfig from renku_data_services.platform.db import PlatformRepository from renku_data_services.project.db import ProjectMemberRepository, ProjectRepository from renku_data_services.repositories.db import GitRepositoriesRepository from renku_data_services.secrets.db import UserSecretsRepo from renku_data_services.session.db import SessionRepository -from renku_data_services.storage.db import StorageRepository, StorageV2Repository +from renku_data_services.storage.db import StorageRepository from renku_data_services.users.config import UserPreferencesConfig from renku_data_services.users.db import UserPreferencesRepository from renku_data_services.users.db import UserRepo as KcUserRepo @@ -144,6 +147,7 @@ class Config: kc_api: IKeycloakAPI message_queue: IMessageQueue gitlab_url: str | None + nb_config: NotebooksConfig secrets_service_public_key: rsa.RSAPublicKey """The public key of the secrets service, used to encrypt user secrets that only it can decrypt.""" @@ -162,7 +166,6 @@ class Config: _user_repo: UserRepository | None = field(default=None, repr=False, init=False) _rp_repo: ResourcePoolRepository | None = field(default=None, repr=False, init=False) _storage_repo: StorageRepository | None = field(default=None, repr=False, init=False) - _storage_v2_repo: StorageV2Repository | None = field(default=None, repr=False, init=False) _project_repo: ProjectRepository | None = field(default=None, repr=False, init=False) _group_repo: GroupRepository | None = field(default=None, repr=False, init=False) _event_repo: EventRepository | None = field(default=None, repr=False, init=False) @@ -174,6 +177,10 @@ class Config: _connected_services_repo: ConnectedServicesRepository | None = field(default=None, repr=False, init=False) _git_repositories_repo: GitRepositoriesRepository | None = field(default=None, repr=False, init=False) _platform_repo: PlatformRepository | None = field(default=None, repr=False, init=False) + _data_connector_repo: DataConnectorRepository | None = field(default=None, repr=False, init=False) + _data_connector_to_project_link_repo: DataConnectorProjectLinkRepository | None = field( + default=None, repr=False, init=False + ) def __post_init__(self) -> None: spec_file = Path(renku_data_services.crc.__file__).resolve().parent / "api.spec.yaml" @@ -208,10 +215,18 @@ def __post_init__(self) -> None: with open(spec_file) as f: repositories = safe_load(f) + spec_file = Path(renku_data_services.notebooks.__file__).resolve().parent / "api.spec.yaml" + with open(spec_file) as f: + repositories = safe_load(f) + spec_file = Path(renku_data_services.platform.__file__).resolve().parent / "api.spec.yaml" with open(spec_file) as f: platform = safe_load(f) + spec_file = Path(renku_data_services.data_connectors.__file__).resolve().parent / "api.spec.yaml" + with open(spec_file) as f: + data_connectors = safe_load(f) + self.spec = merge_api_specs( crc_spec, storage_spec, @@ -222,6 +237,7 @@ def __post_init__(self) -> None: connected_services, repositories, platform, + data_connectors, ) if self.default_resource_pool_file is not None: @@ -266,18 +282,6 @@ def storage_repo(self) -> StorageRepository: ) return self._storage_repo - @property - def storage_v2_repo(self) -> StorageV2Repository: - """The DB adapter for V2 cloud storage configs.""" - if not self._storage_v2_repo: - self._storage_v2_repo = StorageV2Repository( - session_maker=self.db.async_session_maker, - project_authz=self.authz, - user_repo=self.kc_user_repo, - secret_service_public_key=self.secrets_service_public_key, - ) - return self._storage_v2_repo - @property def event_repo(self) -> EventRepository: """The DB adapter for cloud event configs.""" @@ -398,6 +402,27 @@ def platform_repo(self) -> PlatformRepository: ) return self._platform_repo + @property + def data_connector_repo(self) -> DataConnectorRepository: + """The DB adapter for data connectors.""" + if not self._data_connector_repo: + self._data_connector_repo = DataConnectorRepository( + session_maker=self.db.async_session_maker, + authz=self.authz, + user_repo=self.kc_user_repo, + secret_service_public_key=self.secrets_service_public_key, + ) + return self._data_connector_repo + + @property + def data_connector_to_project_link_repo(self) -> DataConnectorProjectLinkRepository: + """The DB adapter for data connector to project links.""" + if not self._data_connector_to_project_link_repo: + self._data_connector_to_project_link_repo = DataConnectorProjectLinkRepository( + session_maker=self.db.async_session_maker, authz=self.authz + ) + return self._data_connector_to_project_link_repo + @classmethod def from_env(cls, prefix: str = "") -> "Config": """Create a config from environment variables.""" @@ -408,8 +433,8 @@ def from_env(cls, prefix: str = "") -> "Config": gitlab_client: base_models.GitlabAPIProtocol user_preferences_config: UserPreferencesConfig version = os.environ.get(f"{prefix}VERSION", "0.0.1") - server_options_file = os.environ.get("SERVER_OPTIONS") - server_defaults_file = os.environ.get("SERVER_DEFAULTS") + server_options_file = os.environ.get("NB_SERVER_OPTIONS__UI_CHOICES_PATH") + server_defaults_file = os.environ.get("NB_SERVER_OPTIONS__DEFAULTS_PATH") k8s_namespace = os.environ.get("K8S_NAMESPACE", "default") max_pinned_projects = int(os.environ.get(f"{prefix}MAX_PINNED_PROJECTS", "10")) user_preferences_config = UserPreferencesConfig(max_pinned_projects=max_pinned_projects) @@ -491,6 +516,7 @@ def from_env(cls, prefix: str = "") -> "Config": sentry = SentryConfig.from_env(prefix) trusted_proxies = TrustedProxiesConfig.from_env(prefix) message_queue = RedisQueue(redis) + nb_config = NotebooksConfig.from_env(db) return cls( version=version, @@ -511,4 +537,5 @@ def from_env(cls, prefix: str = "") -> "Config": encryption_key=encryption_key, secrets_service_public_key=secrets_service_public_key, gitlab_url=gitlab_url, + nb_config=nb_config, ) diff --git a/components/renku_data_services/authn/dummy.py b/components/renku_data_services/authn/dummy.py index 6da7b09b9..d10acedb0 100644 --- a/components/renku_data_services/authn/dummy.py +++ b/components/renku_data_services/authn/dummy.py @@ -7,6 +7,7 @@ from typing import Optional from sanic import Request +from ulid import ULID import renku_data_services.base_models as base_models @@ -39,10 +40,22 @@ class DummyAuthenticator: """ token_field = "Authorization" # nosec: B105 + anon_id_header_key: str = "Renku-Auth-Anon-Id" + anon_id_cookie_name: str = "Renku-Auth-Anon-Id" - @staticmethod - async def authenticate(access_token: str, request: Request) -> base_models.APIUser: + async def authenticate(self, access_token: str, request: Request) -> base_models.APIUser: """Indicates whether the user has successfully logged in.""" + access_token = request.headers.get(self.token_field) or "" + if not access_token or len(access_token) == 0: + # Try to get an anonymous user ID if the validation of keycloak credentials failed + anon_id = request.headers.get(self.anon_id_header_key) + if anon_id is None: + anon_id = request.cookies.get(self.anon_id_cookie_name) + if anon_id is None: + anon_id = f"anon-{str(ULID())}" + return base_models.AnonymousAPIUser(id=str(anon_id)) + + access_token = access_token.removeprefix("Bearer ").removeprefix("bearer ") user_props = {} with contextlib.suppress(Exception): user_props = json.loads(access_token) @@ -64,4 +77,5 @@ async def authenticate(access_token: str, request: Request) -> base_models.APIUs last_name=user_props.get("last_name", "Doe") if is_set else None, email=user_props.get("email", "john.doe@gmail.com") if is_set else None, full_name=user_props.get("full_name", "John Doe") if is_set else None, + refresh_token=request.headers.get("Renku-Auth-Refresh-Token"), ) diff --git a/components/renku_data_services/authn/gitlab.py b/components/renku_data_services/authn/gitlab.py index 6b8436a17..2a7d4cc83 100644 --- a/components/renku_data_services/authn/gitlab.py +++ b/components/renku_data_services/authn/gitlab.py @@ -2,10 +2,13 @@ import contextlib import urllib.parse as parse +from contextlib import suppress from dataclasses import dataclass +from datetime import datetime import gitlab from sanic import Request +from sanic.compat import Header import renku_data_services.base_models as base_models from renku_data_services import errors @@ -23,6 +26,7 @@ class GitlabAuthenticator: gitlab_url: str token_field: str = "Gitlab-Access-Token" + expires_at_field: str = "Gitlab-Access-Token-Expires-At" def __post_init__(self) -> None: """Properly set gitlab url.""" @@ -36,10 +40,10 @@ async def authenticate(self, access_token: str, request: Request) -> base_models if self.token_field != "Authorization": # nosec: B105 access_token = str(request.headers.get(self.token_field)) - result = await self._get_gitlab_api_user(access_token) + result = await self._get_gitlab_api_user(access_token, request.headers) return result - async def _get_gitlab_api_user(self, access_token: str) -> base_models.APIUser: + async def _get_gitlab_api_user(self, access_token: str, headers: Header) -> base_models.APIUser: """Get and validate a Gitlab API User.""" client = gitlab.Gitlab(self.gitlab_url, oauth_token=access_token) try: @@ -69,12 +73,18 @@ async def _get_gitlab_api_user(self, access_token: str) -> base_models.APIUser: if len(name_parts) >= 1: last_name = " ".join(name_parts) + expires_at: datetime | None = None + expires_at_raw: str | None = headers.get(self.expires_at_field) + if expires_at_raw is not None and len(expires_at_raw) > 0: + with suppress(ValueError): + expires_at = datetime.fromtimestamp(float(expires_at_raw)) + return base_models.APIUser( - is_admin=False, id=str(user_id), access_token=access_token, first_name=first_name, last_name=last_name, email=email, full_name=full_name, + access_token_expires_at=expires_at, ) diff --git a/components/renku_data_services/authn/keycloak.py b/components/renku_data_services/authn/keycloak.py index 9c71fc728..bdca34708 100644 --- a/components/renku_data_services/authn/keycloak.py +++ b/components/renku_data_services/authn/keycloak.py @@ -1,15 +1,19 @@ """Keycloak user store.""" +from contextlib import suppress from dataclasses import dataclass +from datetime import datetime from typing import Any, Optional, cast import httpx import jwt from jwt import PyJWKClient from sanic import Request +from ulid import ULID import renku_data_services.base_models as base_models from renku_data_services import errors +from renku_data_services.base_models.core import Authenticator from renku_data_services.utils.core import get_ssl_context @@ -34,44 +38,83 @@ async def get_user_by_id(self, id: str, access_token: str) -> Optional[base_mode @dataclass -class KeycloakAuthenticator: +class KeycloakAuthenticator(Authenticator): """Authenticator for JWT access tokens from Keycloak.""" jwks: PyJWKClient algorithms: list[str] admin_role: str = "renku-admin" token_field: str = "Authorization" + refresh_token_header: str = "Renku-Auth-Refresh-Token" + anon_id_header_key: str = "Renku-Auth-Anon-Id" + anon_id_cookie_name: str = "Renku-Auth-Anon-Id" def __post_init__(self) -> None: if len(self.algorithms) == 0: raise errors.ConfigurationError(message="At least one algorithm for token validation has to be specified.") def _validate(self, token: str) -> dict[str, Any]: - sk = self.jwks.get_signing_key_from_jwt(token) - return cast( - dict[str, Any], - jwt.decode( - token, - key=sk.key, - algorithms=self.algorithms, - audience=["renku", "renku-ui", "renku-cli", "swagger"], - verify=True, - ), - ) - - async def authenticate(self, access_token: str, request: Request) -> base_models.APIUser: + try: + sk = self.jwks.get_signing_key_from_jwt(token) + return cast( + dict[str, Any], + jwt.decode( + token, + key=sk.key, + algorithms=self.algorithms, + audience=["renku", "renku-ui", "renku-cli", "swagger"], + verify=True, + ), + ) + except (jwt.InvalidSignatureError, jwt.MissingRequiredClaimError): + # NOTE: the above errors are subclasses of `InvalidToken` below but they will result from keycloak + # misconfiguration most often rather than from the user having done something so we surface them. + raise + except jwt.InvalidTokenError: + raise errors.UnauthorizedError( + message="Your credentials are invalid or expired, please log in again.", quiet=True + ) + + async def authenticate( + self, access_token: str, request: Request + ) -> base_models.AuthenticatedAPIUser | base_models.AnonymousAPIUser: """Checks the validity of the access token.""" - if self.token_field != "Authorization": # nosec: B105 - access_token = str(request.headers.get(self.token_field)) - - parsed = self._validate(access_token) - is_admin = self.admin_role in parsed.get("realm_access", {}).get("roles", []) - return base_models.APIUser( - is_admin=is_admin, - id=parsed.get("sub"), - access_token=access_token, - full_name=parsed.get("name"), - first_name=parsed.get("given_name"), - last_name=parsed.get("family_name"), - email=parsed.get("email"), - ) + header_value = str(request.headers.get(self.token_field)) + refresh_token = request.headers.get(self.refresh_token_header) + user: base_models.AuthenticatedAPIUser | base_models.AnonymousAPIUser | None = None + + # Try to get the authorization header for a fully authenticated user + with suppress(errors.UnauthorizedError, jwt.InvalidTokenError): + token = str(header_value).removeprefix("Bearer ").removeprefix("bearer ") + parsed = self._validate(token) + is_admin = self.admin_role in parsed.get("realm_access", {}).get("roles", []) + exp = parsed.get("exp") + id = parsed.get("sub") + email = parsed.get("email") + if id is None or email is None: + raise errors.UnauthorizedError( + message="Your credentials are invalid or expired, please log in again.", quiet=True + ) + user = base_models.AuthenticatedAPIUser( + is_admin=is_admin, + id=id, + access_token=access_token, + full_name=parsed.get("name"), + first_name=parsed.get("given_name"), + last_name=parsed.get("family_name"), + email=email, + refresh_token=str(refresh_token) if refresh_token else None, + access_token_expires_at=datetime.fromtimestamp(exp) if exp is not None else None, + ) + if user is not None: + return user + + # Try to get an anonymous user ID if the validation of keycloak credentials failed + anon_id = request.headers.get(self.anon_id_header_key) + if anon_id is None: + anon_id = request.cookies.get(self.anon_id_cookie_name) + if anon_id is None: + anon_id = f"anon-{str(ULID())}" + user = base_models.AnonymousAPIUser(id=str(anon_id)) + + return user diff --git a/components/renku_data_services/authz/authz.py b/components/renku_data_services/authz/authz.py index 38386204c..dc072b452 100644 --- a/components/renku_data_services/authz/authz.py +++ b/components/renku_data_services/authz/authz.py @@ -32,6 +32,7 @@ from renku_data_services.authz.config import AuthzConfig from renku_data_services.authz.models import Change, Member, MembershipChange, Role, Scope, Visibility from renku_data_services.base_models.core import InternalServiceAdmin +from renku_data_services.data_connectors.models import DataConnector, DataConnectorToProjectLink, DataConnectorUpdate from renku_data_services.errors import errors from renku_data_services.namespace.models import Group, GroupUpdate, Namespace, NamespaceKind, NamespaceUpdate from renku_data_services.project.models import Project, ProjectUpdate @@ -51,7 +52,15 @@ def authz(self) -> "Authz": _AuthzChangeFuncResult = TypeVar( "_AuthzChangeFuncResult", - bound=Project | ProjectUpdate | Group | UserInfoUpdate | list[UserInfo] | None, + bound=Project + | ProjectUpdate + | Group + | UserInfoUpdate + | list[UserInfo] + | DataConnector + | DataConnectorUpdate + | DataConnectorToProjectLink + | None, ) _T = TypeVar("_T") _WithAuthz = TypeVar("_WithAuthz", bound=WithAuthz) @@ -87,6 +96,9 @@ class _Relation(StrEnum): group_platform: str = "group_platform" user_namespace_platform: str = "user_namespace_platform" project_namespace: str = "project_namespace" + data_connector_platform: str = "data_connector_platform" + data_connector_namespace: str = "data_connector_namespace" + linked_to: str = "linked_to" @classmethod def from_role(cls, role: Role) -> "_Relation": @@ -119,6 +131,7 @@ class ResourceType(StrEnum): platform: str = "platform" group: str = "group" user_namespace: str = "user_namespace" + data_connector: str = "data_connector" class AuthzOperation(StrEnum): @@ -129,6 +142,8 @@ class AuthzOperation(StrEnum): update: str = "update" update_or_insert: str = "update_or_insert" insert_many: str = "insert_many" + create_link: str = "create_link" + delete_link: str = "delete_link" class _AuthzConverter: @@ -170,6 +185,10 @@ def group(id: ULID) -> ObjectReference: def user_namespace(id: ULID) -> ObjectReference: return ObjectReference(object_type=ResourceType.user_namespace, object_id=str(id)) + @staticmethod + def data_connector(id: ULID) -> ObjectReference: + return ObjectReference(object_type=ResourceType.data_connector.value, object_id=str(id)) + @staticmethod def to_object(resource_type: ResourceType, resource_id: str | ULID | int) -> ObjectReference: match (resource_type, resource_id): @@ -183,6 +202,8 @@ def to_object(resource_type: ResourceType, resource_id: str | ULID | int) -> Obj return _AuthzConverter.user_namespace(rid) case (ResourceType.group, rid) if isinstance(rid, ULID): return _AuthzConverter.group(rid) + case (ResourceType.data_connector, dcid) if isinstance(dcid, ULID): + return _AuthzConverter.data_connector(dcid) raise errors.ProgrammingError( message=f"Unexpected or unknown resource type when checking permissions {resource_type}" ) @@ -215,7 +236,7 @@ async def decorated_function( message="The authorization decorator needs to have at least one positional argument after 'user'" ) potential_resource = args[0] - resource: Project | Group | Namespace | None = None + resource: Project | Group | Namespace | DataConnector | None = None match resource_type: case ResourceType.project if isinstance(potential_resource, Project): resource = potential_resource @@ -223,6 +244,8 @@ async def decorated_function( resource = potential_resource case ResourceType.user_namespace if isinstance(potential_resource, Namespace): resource = potential_resource + case ResourceType.data_connector if isinstance(potential_resource, DataConnector): + resource = potential_resource case _: raise errors.ProgrammingError( message="The decorator for checking permissions for authorization database operations " @@ -504,11 +527,40 @@ async def _get_authz_change( f"database updates for inserting namespaces but found {type(res)}" ) authz_change.extend(db_repo.authz._add_user_namespace(res.namespace)) + case AuthzOperation.create, ResourceType.data_connector if isinstance(result, DataConnector): + authz_change = db_repo.authz._add_data_connector(result) + case AuthzOperation.delete, ResourceType.data_connector if result is None: + # NOTE: This means that the data connector does not exist in the first place so nothing was deleted + pass + case AuthzOperation.delete, ResourceType.data_connector if isinstance(result, DataConnector): + user = _extract_user_from_args(*func_args, **func_kwargs) + authz_change = await db_repo.authz._remove_data_connector(user, result) + case AuthzOperation.update, ResourceType.data_connector if isinstance(result, DataConnectorUpdate): + authz_change = _AuthzChange() + if result.old.visibility != result.new.visibility: + user = _extract_user_from_args(*func_args, **func_kwargs) + authz_change.extend(await db_repo.authz._update_data_connector_visibility(user, result.new)) + if result.old.namespace.id != result.new.namespace.id: + user = _extract_user_from_args(*func_args, **func_kwargs) + authz_change.extend(await db_repo.authz._update_data_connector_namespace(user, result.new)) + case AuthzOperation.create_link, ResourceType.data_connector if isinstance( + result, DataConnectorToProjectLink + ): + user = _extract_user_from_args(*func_args, **func_kwargs) + authz_change = await db_repo.authz._add_data_connector_to_project_link(user, result) + case AuthzOperation.delete_link, ResourceType.data_connector if result is None: + # NOTE: This means that the link does not exist in the first place so nothing was deleted + pass + case AuthzOperation.delete_link, ResourceType.data_connector if isinstance( + result, DataConnectorToProjectLink + ): + user = _extract_user_from_args(*func_args, **func_kwargs) + authz_change = await db_repo.authz._add_data_connector_to_project_link(user, result) case _: resource_id: str | ULID | None = "unknown" - if isinstance(result, (Project, Namespace, Group)): + if isinstance(result, (Project, Namespace, Group, DataConnector)): resource_id = result.id - elif isinstance(result, (ProjectUpdate, NamespaceUpdate, GroupUpdate)): + elif isinstance(result, (ProjectUpdate, NamespaceUpdate, GroupUpdate, DataConnectorUpdate)): resource_id = result.new.id raise errors.ProgrammingError( message=f"Encountered an unknown authorization operation {op} on resource {resource} " @@ -597,12 +649,12 @@ def _add_project(self, project: Project) -> _AuthzChange: if project.visibility == Visibility.PUBLIC: all_users_are_viewers = Relationship( resource=project_res, - relation=_Relation.viewer.value, + relation=_Relation.public_viewer.value, subject=all_users, ) all_anon_users_are_viewers = Relationship( resource=project_res, - relation=_Relation.viewer.value, + relation=_Relation.public_viewer.value, subject=all_anon_users, ) relationships.extend([all_users_are_viewers, all_anon_users_are_viewers]) @@ -652,12 +704,12 @@ async def _update_project_visibility( anon_users_sub = SubjectReference(object=_AuthzConverter.anonymous_users()) all_users_are_viewers = Relationship( resource=project_res, - relation=_Relation.viewer.value, + relation=_Relation.public_viewer.value, subject=all_users_sub, ) anon_users_are_viewers = Relationship( resource=project_res, - relation=_Relation.viewer.value, + relation=_Relation.public_viewer.value, subject=anon_users_sub, ) make_public = WriteRelationshipsRequest( @@ -1358,3 +1410,306 @@ async def _remove_user_namespace(self, user_id: str, zed_token: ZedToken | None updates=[RelationshipUpdate(operation=RelationshipUpdate.OPERATION_TOUCH, relationship=i) for i in rels] ) return _AuthzChange(apply=apply, undo=undo) + + def _add_data_connector(self, data_connector: DataConnector) -> _AuthzChange: + """Create the new data connector and associated resources and relations in the DB.""" + creator = SubjectReference(object=_AuthzConverter.user(data_connector.created_by)) + data_connector_res = _AuthzConverter.data_connector(data_connector.id) + creator_is_owner = Relationship(resource=data_connector_res, relation=_Relation.owner.value, subject=creator) + all_users = SubjectReference(object=_AuthzConverter.all_users()) + all_anon_users = SubjectReference(object=_AuthzConverter.anonymous_users()) + data_connector_namespace = SubjectReference( + object=_AuthzConverter.user_namespace(data_connector.namespace.id) + if data_connector.namespace.kind == NamespaceKind.user + else _AuthzConverter.group(cast(ULID, data_connector.namespace.underlying_resource_id)) + ) + data_connector_in_platform = Relationship( + resource=data_connector_res, + relation=_Relation.data_connector_platform, + subject=SubjectReference(object=self._platform), + ) + data_connector_in_namespace = Relationship( + resource=data_connector_res, relation=_Relation.data_connector_namespace, subject=data_connector_namespace + ) + relationships = [creator_is_owner, data_connector_in_platform, data_connector_in_namespace] + if data_connector.visibility == Visibility.PUBLIC: + all_users_are_viewers = Relationship( + resource=data_connector_res, + relation=_Relation.public_viewer.value, + subject=all_users, + ) + all_anon_users_are_viewers = Relationship( + resource=data_connector_res, + relation=_Relation.public_viewer.value, + subject=all_anon_users, + ) + relationships.extend([all_users_are_viewers, all_anon_users_are_viewers]) + apply = WriteRelationshipsRequest( + updates=[ + RelationshipUpdate(operation=RelationshipUpdate.OPERATION_TOUCH, relationship=i) for i in relationships + ] + ) + undo = WriteRelationshipsRequest( + updates=[ + RelationshipUpdate(operation=RelationshipUpdate.OPERATION_DELETE, relationship=i) for i in relationships + ] + ) + return _AuthzChange(apply=apply, undo=undo) + + @_is_allowed_on_resource(Scope.DELETE, ResourceType.data_connector) + async def _remove_data_connector( + self, user: base_models.APIUser, data_connector: DataConnector, *, zed_token: ZedToken | None = None + ) -> _AuthzChange: + """Remove the relationships associated with the data connector.""" + consistency = Consistency(at_least_as_fresh=zed_token) if zed_token else Consistency(fully_consistent=True) + rel_filter = RelationshipFilter( + resource_type=ResourceType.data_connector.value, optional_resource_id=str(data_connector.id) + ) + responses: AsyncIterable[ReadRelationshipsResponse] = self.client.ReadRelationships( + ReadRelationshipsRequest(consistency=consistency, relationship_filter=rel_filter) + ) + rels: list[Relationship] = [] + async for response in responses: + rels.append(response.relationship) + apply = WriteRelationshipsRequest( + updates=[RelationshipUpdate(operation=RelationshipUpdate.OPERATION_DELETE, relationship=i) for i in rels] + ) + undo = WriteRelationshipsRequest( + updates=[RelationshipUpdate(operation=RelationshipUpdate.OPERATION_TOUCH, relationship=i) for i in rels] + ) + return _AuthzChange(apply=apply, undo=undo) + + # NOTE changing visibility is the same access level as removal + @_is_allowed_on_resource(Scope.DELETE, ResourceType.data_connector) + async def _update_data_connector_visibility( + self, user: base_models.APIUser, data_connector: DataConnector, *, zed_token: ZedToken | None = None + ) -> _AuthzChange: + """Update the visibility of the data connector in the authorization database.""" + data_connector_id_str = str(data_connector.id) + consistency = Consistency(at_least_as_fresh=zed_token) if zed_token else Consistency(fully_consistent=True) + data_connector_res = _AuthzConverter.data_connector(data_connector.id) + all_users_sub = SubjectReference(object=_AuthzConverter.all_users()) + anon_users_sub = SubjectReference(object=_AuthzConverter.anonymous_users()) + all_users_are_viewers = Relationship( + resource=data_connector_res, + relation=_Relation.public_viewer.value, + subject=all_users_sub, + ) + anon_users_are_viewers = Relationship( + resource=data_connector_res, + relation=_Relation.public_viewer.value, + subject=anon_users_sub, + ) + make_public = WriteRelationshipsRequest( + updates=[ + RelationshipUpdate(operation=RelationshipUpdate.OPERATION_TOUCH, relationship=all_users_are_viewers), + RelationshipUpdate(operation=RelationshipUpdate.OPERATION_TOUCH, relationship=anon_users_are_viewers), + ] + ) + make_private = WriteRelationshipsRequest( + updates=[ + RelationshipUpdate(operation=RelationshipUpdate.OPERATION_DELETE, relationship=all_users_are_viewers), + RelationshipUpdate(operation=RelationshipUpdate.OPERATION_DELETE, relationship=anon_users_are_viewers), + ] + ) + rel_filter = RelationshipFilter( + resource_type=ResourceType.data_connector.value, + optional_resource_id=data_connector_id_str, + optional_subject_filter=SubjectFilter( + subject_type=ResourceType.user.value, optional_subject_id=all_users_sub.object.object_id + ), + ) + current_relation_users: ReadRelationshipsResponse | None = await anext( + aiter( + self.client.ReadRelationships( + ReadRelationshipsRequest(consistency=consistency, relationship_filter=rel_filter) + ) + ), + None, + ) + rel_filter = RelationshipFilter( + resource_type=ResourceType.project.value, + optional_resource_id=data_connector_id_str, + optional_subject_filter=SubjectFilter( + subject_type=ResourceType.anonymous_user.value, + optional_subject_id=anon_users_sub.object.object_id, + ), + ) + current_relation_anon_users: ReadRelationshipsResponse | None = await anext( + aiter( + self.client.ReadRelationships( + ReadRelationshipsRequest(consistency=consistency, relationship_filter=rel_filter) + ) + ), + None, + ) + data_connector_is_public_for_users = ( + current_relation_users is not None + and current_relation_users.relationship.subject.object.object_type == ResourceType.user.value + and current_relation_users.relationship.subject.object.object_id == all_users_sub.object.object_id + ) + data_connector_is_public_for_anon_users = ( + current_relation_anon_users is not None + and current_relation_anon_users.relationship.subject.object.object_type == ResourceType.anonymous_user.value + and current_relation_anon_users.relationship.subject.object.object_id == anon_users_sub.object.object_id, + ) + data_connector_already_public = data_connector_is_public_for_users and data_connector_is_public_for_anon_users + data_connector_already_private = not data_connector_already_public + match data_connector.visibility: + case Visibility.PUBLIC: + if data_connector_already_public: + return _AuthzChange(apply=WriteRelationshipsRequest(), undo=WriteRelationshipsRequest()) + return _AuthzChange(apply=make_public, undo=make_private) + case Visibility.PRIVATE: + if data_connector_already_private: + return _AuthzChange(apply=WriteRelationshipsRequest(), undo=WriteRelationshipsRequest()) + return _AuthzChange(apply=make_private, undo=make_public) + raise errors.ProgrammingError( + message=f"Encountered unknown data connector visibility {data_connector.visibility} when trying to " + f"make a visibility change for data connector with ID {data_connector.id}", + ) + + # NOTE changing namespace is the same access level as removal + @_is_allowed_on_resource(Scope.DELETE, ResourceType.data_connector) + async def _update_data_connector_namespace( + self, user: base_models.APIUser, data_connector: DataConnector, *, zed_token: ZedToken | None = None + ) -> _AuthzChange: + """Update the namespace of the data connector in the authorization database.""" + consistency = Consistency(at_least_as_fresh=zed_token) if zed_token else Consistency(fully_consistent=True) + data_connector_res = _AuthzConverter.data_connector(data_connector.id) + data_connector_filter = RelationshipFilter( + resource_type=ResourceType.data_connector.value, + optional_resource_id=str(data_connector.id), + optional_relation=_Relation.data_connector_namespace.value, + ) + current_namespace: ReadRelationshipsResponse | None = await anext( + aiter( + self.client.ReadRelationships( + ReadRelationshipsRequest(relationship_filter=data_connector_filter, consistency=consistency) + ) + ), + None, + ) + if not current_namespace: + raise errors.ProgrammingError( + message=f"The data connector with ID {data_connector.id} whose namespace is being updated " + "does not currently have a namespace." + ) + if current_namespace.relationship.subject.object.object_id == data_connector.namespace.id: + return _AuthzChange() + new_namespace_sub = ( + SubjectReference(object=_AuthzConverter.group(data_connector.namespace.id)) + if data_connector.namespace.kind == NamespaceKind.group + else SubjectReference(object=_AuthzConverter.user_namespace(data_connector.namespace.id)) + ) + old_namespace_sub = ( + SubjectReference( + object=_AuthzConverter.group(ULID.from_str(current_namespace.relationship.subject.object.object_id)) + ) + if current_namespace.relationship.subject.object.object_type == ResourceType.group.value + else SubjectReference( + object=_AuthzConverter.user_namespace( + ULID.from_str(current_namespace.relationship.subject.object.object_id) + ) + ) + ) + new_namespace = Relationship( + resource=data_connector_res, + relation=_Relation.data_connector_namespace.value, + subject=new_namespace_sub, + ) + old_namespace = Relationship( + resource=data_connector_res, + relation=_Relation.data_connector_namespace.value, + subject=old_namespace_sub, + ) + apply_change = WriteRelationshipsRequest( + updates=[ + RelationshipUpdate(operation=RelationshipUpdate.OPERATION_TOUCH, relationship=new_namespace), + ] + ) + undo_change = WriteRelationshipsRequest( + updates=[ + RelationshipUpdate(operation=RelationshipUpdate.OPERATION_TOUCH, relationship=old_namespace), + ] + ) + return _AuthzChange(apply=apply_change, undo=undo_change) + + async def _add_data_connector_to_project_link( + self, user: base_models.APIUser, link: DataConnectorToProjectLink + ) -> _AuthzChange: + """Links a data connector to a project.""" + # NOTE: we manually check for permissions here since it is not trivially expressed through decorators + allowed_from = await self.has_permission( + user, ResourceType.data_connector, link.data_connector_id, Scope.ADD_LINK + ) + if not allowed_from: + raise errors.MissingResourceError( + message=f"The user with ID {user.id} cannot perform operation {Scope.ADD_LINK} " + f"on {ResourceType.data_connector.value} " + f"with ID {link.data_connector_id} or the resource does not exist." + ) + allowed_to = await self.has_permission(user, ResourceType.project, link.project_id, Scope.WRITE) + if not allowed_to: + raise errors.MissingResourceError( + message=f"The user with ID {user.id} cannot perform operation {Scope.WRITE} " + f"on {ResourceType.project.value} " + f"with ID {link.project_id} or the resource does not exist." + ) + + data_connector_res = _AuthzConverter.data_connector(link.data_connector_id) + project_subject = SubjectReference(object=_AuthzConverter.project(link.project_id)) + relationship = Relationship( + resource=data_connector_res, + relation=_Relation.linked_to.value, + subject=project_subject, + ) + apply = WriteRelationshipsRequest( + updates=[RelationshipUpdate(operation=RelationshipUpdate.OPERATION_TOUCH, relationship=relationship)] + ) + undo = WriteRelationshipsRequest( + updates=[RelationshipUpdate(operation=RelationshipUpdate.OPERATION_DELETE, relationship=relationship)] + ) + change = _AuthzChange( + apply=apply, + undo=undo, + ) + return change + + async def _remove_data_connector_to_project_link( + self, user: base_models.APIUser, link: DataConnectorToProjectLink + ) -> _AuthzChange: + """Remove the relationships associated with the link from a data connector to a project.""" + # NOTE: we manually check for permissions here since it is not trivially expressed through decorators + allowed_from = await self.has_permission( + user, ResourceType.data_connector, link.data_connector_id, Scope.DELETE + ) + allowed_to, zed_token = await self._has_permission(user, ResourceType.project, link.project_id, Scope.WRITE) + allowed = allowed_from or allowed_to + if not allowed: + raise errors.MissingResourceError( + message=f"The user with ID {user.id} cannot perform operation {AuthzOperation.delete_link}" + f"on the data connector to project link with ID {link.id} or the resource does not exist." + ) + consistency = Consistency(at_least_as_fresh=zed_token) if zed_token else Consistency(fully_consistent=True) + rel_filter = RelationshipFilter( + resource_type=ResourceType.data_connector.value, + optional_resource_id=str(link.data_connector_id), + optional_relation=_Relation.linked_to.value, + optional_subject_filter=SubjectFilter( + subject_type=ResourceType.project.value, optional_subject_id=str(link.project_id) + ), + ) + responses: AsyncIterable[ReadRelationshipsResponse] = self.client.ReadRelationships( + ReadRelationshipsRequest(consistency=consistency, relationship_filter=rel_filter) + ) + rels: list[Relationship] = [] + async for response in responses: + rels.append(response.relationship) + apply = WriteRelationshipsRequest( + updates=[RelationshipUpdate(operation=RelationshipUpdate.OPERATION_DELETE, relationship=i) for i in rels] + ) + undo = WriteRelationshipsRequest( + updates=[RelationshipUpdate(operation=RelationshipUpdate.OPERATION_TOUCH, relationship=i) for i in rels] + ) + return _AuthzChange(apply=apply, undo=undo) diff --git a/components/renku_data_services/authz/models.py b/components/renku_data_services/authz/models.py index 171d848b9..6f13def4e 100644 --- a/components/renku_data_services/authz/models.py +++ b/components/renku_data_services/authz/models.py @@ -50,6 +50,7 @@ class Scope(Enum): DELETE: str = "delete" CHANGE_MEMBERSHIP: str = "change_membership" READ_CHILDREN: str = "read_children" + ADD_LINK: str = "add_link" @dataclass diff --git a/components/renku_data_services/authz/schemas.py b/components/renku_data_services/authz/schemas.py index e43cd1b6e..94786267f 100644 --- a/components/renku_data_services/authz/schemas.py +++ b/components/renku_data_services/authz/schemas.py @@ -3,10 +3,12 @@ These are applied through alembic migrations in the common migrations folder. """ +from collections.abc import Iterable from dataclasses import dataclass +from typing import cast from authzed.api.v1 import SyncClient -from authzed.api.v1.core_pb2 import SubjectReference +from authzed.api.v1.core_pb2 import Relationship, RelationshipUpdate, SubjectReference from authzed.api.v1.permission_service_pb2 import ( DeleteRelationshipsRequest, DeleteRelationshipsResponse, @@ -15,6 +17,7 @@ WriteRelationshipsRequest, ) from authzed.api.v1.schema_service_pb2 import WriteSchemaRequest, WriteSchemaResponse +from ulid import ULID from renku_data_services.authz.authz import ResourceType, _AuthzConverter, _Relation from renku_data_services.errors import errors @@ -45,7 +48,7 @@ def _write_to_db( output.append(res) case _: raise errors.ProgrammingError( - message=f"Found an uknown authorization migration type {type(request)}" + message=f"Found an unknown authorization migration type {type(request)}" ) return output @@ -281,3 +284,156 @@ def downgrade( WriteSchemaRequest(schema=_v2), ], ) + +_v4: str = """\ +definition user {} + +definition group { + relation group_platform: platform + relation owner: user + relation editor: user + relation viewer: user + relation public_viewer: user:* | anonymous_user:* + permission read = public_viewer + read_children + permission read_children = viewer + write + permission write = editor + delete + permission change_membership = delete + permission delete = owner + group_platform->is_admin +} + +definition user_namespace { + relation user_namespace_platform: platform + relation owner: user + relation public_viewer: user:* | anonymous_user:* + permission read = public_viewer + read_children + permission read_children = delete + permission write = delete + permission delete = owner + user_namespace_platform->is_admin +} + +definition anonymous_user {} + +definition platform { + relation admin: user + permission is_admin = admin +} + +definition project { + relation project_platform: platform + relation project_namespace: user_namespace | group + relation owner: user + relation editor: user + relation viewer: user + relation public_viewer: user:* | anonymous_user:* + permission read = public_viewer + viewer + write + project_namespace->read_children + permission read_linked_resources = viewer + editor + owner + project_platform->is_admin + permission write = editor + delete + project_namespace->write + permission change_membership = delete + permission delete = owner + project_platform->is_admin + project_namespace->delete +} + +definition data_connector { + relation data_connector_platform: platform + relation data_connector_namespace: user_namespace | group + relation linked_to: project + relation owner: user + relation editor: user + relation viewer: user + relation public_viewer: user:* | anonymous_user:* + permission read = public_viewer + viewer + write + \ + data_connector_namespace->read_children + read_from_linked_resource + permission read_from_linked_resource = linked_to->read_linked_resources + permission write = editor + delete + data_connector_namespace->write + permission change_membership = delete + permission delete = owner + data_connector_platform->is_admin + data_connector_namespace->delete + permission add_link = write + public_viewer +}""" + + +def generate_v4(public_project_ids: Iterable[str]) -> AuthzSchemaMigration: + """Creates the v4 schema migration.""" + up: list[WriteRelationshipsRequest | DeleteRelationshipsRequest | WriteSchemaRequest] = [ + DeleteRelationshipsRequest( + relationship_filter=RelationshipFilter( + resource_type=ResourceType.project.value, + optional_relation=_Relation.viewer.value, + optional_subject_filter=SubjectFilter( + subject_type=ResourceType.user.value, + optional_subject_id=SubjectReference(object=_AuthzConverter.all_users()).object.object_id, + ), + ) + ), + DeleteRelationshipsRequest( + relationship_filter=RelationshipFilter( + resource_type=ResourceType.project.value, + optional_relation=_Relation.viewer.value, + optional_subject_filter=SubjectFilter( + subject_type=ResourceType.anonymous_user.value, + optional_subject_id=SubjectReference(object=_AuthzConverter.anonymous_users()).object.object_id, + ), + ) + ), + WriteSchemaRequest(schema=_v4), + ] + down: list[WriteRelationshipsRequest | DeleteRelationshipsRequest | WriteSchemaRequest] = [ + DeleteRelationshipsRequest( + relationship_filter=RelationshipFilter( + resource_type=ResourceType.project.value, optional_relation=_Relation.public_viewer.value + ) + ), + DeleteRelationshipsRequest( + relationship_filter=RelationshipFilter(resource_type=ResourceType.data_connector.value) + ), + WriteSchemaRequest(schema=_v3), + ] + + all_users_sub = SubjectReference(object=_AuthzConverter.all_users()) + anon_users_sub = SubjectReference(object=_AuthzConverter.anonymous_users()) + for project_id in public_project_ids: + project_res = _AuthzConverter.project(cast(ULID, ULID.from_str(project_id))) + all_users_are_viewers = Relationship( + resource=project_res, + relation=_Relation.public_viewer.value, + subject=all_users_sub, + ) + anon_users_are_viewers = Relationship( + resource=project_res, + relation=_Relation.public_viewer.value, + subject=anon_users_sub, + ) + down_all_users_are_viewers = Relationship( + resource=project_res, + relation=_Relation.viewer.value, + subject=all_users_sub, + ) + down_anon_users_are_viewers = Relationship( + resource=project_res, + relation=_Relation.viewer.value, + subject=anon_users_sub, + ) + up.append( + WriteRelationshipsRequest( + updates=[ + RelationshipUpdate( + operation=RelationshipUpdate.OPERATION_TOUCH, relationship=all_users_are_viewers + ), + RelationshipUpdate( + operation=RelationshipUpdate.OPERATION_TOUCH, relationship=anon_users_are_viewers + ), + ], + ) + ) + down.append( + WriteRelationshipsRequest( + updates=[ + RelationshipUpdate( + operation=RelationshipUpdate.OPERATION_TOUCH, relationship=down_all_users_are_viewers + ), + RelationshipUpdate( + operation=RelationshipUpdate.OPERATION_TOUCH, relationship=down_anon_users_are_viewers + ), + ], + ) + ) + + return AuthzSchemaMigration(up=up, down=down) diff --git a/components/renku_data_services/base_api/auth.py b/components/renku_data_services/base_api/auth.py index a16825550..f468296f4 100644 --- a/components/renku_data_services/base_api/auth.py +++ b/components/renku_data_services/base_api/auth.py @@ -1,14 +1,15 @@ """Authentication decorators for Sanic.""" +import asyncio import re -from collections.abc import Awaitable, Callable, Coroutine +from collections.abc import Callable, Coroutine from functools import wraps from typing import Any, Concatenate, ParamSpec, TypeVar, cast from sanic import Request from renku_data_services import errors -from renku_data_services.base_models import APIUser, Authenticator +from renku_data_services.base_models import AnyAPIUser, APIUser, Authenticator _T = TypeVar("_T") _P = ParamSpec("_P") @@ -17,7 +18,7 @@ def authenticate( authenticator: Authenticator, ) -> Callable[ - [Callable[Concatenate[Request, APIUser, _P], Awaitable[_T]]], + [Callable[Concatenate[Request, AnyAPIUser, _P], Coroutine[Any, Any, _T]]], Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]], ]: """Decorator for a Sanic handler that adds the APIUser model to the context. @@ -26,16 +27,12 @@ def authenticate( """ def decorator( - f: Callable[Concatenate[Request, APIUser, _P], Awaitable[_T]], + f: Callable[Concatenate[Request, AnyAPIUser, _P], Coroutine[Any, Any, _T]], ) -> Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]]: @wraps(f) async def decorated_function(request: Request, *args: _P.args, **kwargs: _P.kwargs) -> _T: token = request.headers.get(authenticator.token_field) - user = APIUser() - if token is not None and len(token) >= 8: - token = token.removeprefix("Bearer ").removeprefix("bearer ") - user = await authenticator.authenticate(token, request) - + user = await authenticator.authenticate(token or "", request) response = await f(request, user, *args, **kwargs) return response @@ -44,9 +41,39 @@ async def decorated_function(request: Request, *args: _P.args, **kwargs: _P.kwar return decorator +def authenticate_2( + authenticator1: Authenticator, + authenticator2: Authenticator, +) -> Callable[ + [Callable[Concatenate[Request, AnyAPIUser, AnyAPIUser, _P], Coroutine[Any, Any, _T]]], + Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]], +]: + """Decorator for a Sanic handler that adds the APIUser when another authentication has already been done.""" + + def decorator( + f: Callable[Concatenate[Request, AnyAPIUser, AnyAPIUser, _P], Coroutine[Any, Any, _T]], + ) -> Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]]: + @wraps(f) + async def decorated_function(request: Request, *args: _P.args, **kwargs: _P.kwargs) -> _T: + token1 = request.headers.get(authenticator1.token_field) + token2 = request.headers.get(authenticator2.token_field) + user1: AnyAPIUser + user2: AnyAPIUser + [user1, user2] = await asyncio.gather( + authenticator1.authenticate(token1 or "", request), + authenticator2.authenticate(token2 or "", request), + ) + response = await f(request, user1, user2, *args, **kwargs) + return response + + return decorated_function + + return decorator + + def validate_path_project_id( - f: Callable[Concatenate[Request, _P], Awaitable[_T]], -) -> Callable[Concatenate[Request, _P], Awaitable[_T]]: + f: Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]], +) -> Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]]: """Decorator for a Sanic handler that validates the project_id path parameter.""" _path_project_id_regex = re.compile(r"^[A-Za-z0-9]{26}$") @@ -69,8 +96,8 @@ async def decorated_function(request: Request, *args: _P.args, **kwargs: _P.kwar def validate_path_user_id( - f: Callable[Concatenate[Request, _P], Awaitable[_T]], -) -> Callable[Concatenate[Request, _P], Awaitable[_T]]: + f: Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]], +) -> Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]]: """Decorator for a Sanic handler that validates the user_id or member_id path parameter.""" _path_user_id_regex = re.compile(r"^[A-Za-z0-9]{1}[A-Za-z0-9-]+$") @@ -102,8 +129,8 @@ async def decorated_function(request: Request, *args: _P.args, **kwargs: _P.kwar def only_admins( - f: Callable[Concatenate[Request, APIUser, _P], Awaitable[_T]], -) -> Callable[Concatenate[Request, APIUser, _P], Awaitable[_T]]: + f: Callable[Concatenate[Request, APIUser, _P], Coroutine[Any, Any, _T]], +) -> Callable[Concatenate[Request, APIUser, _P], Coroutine[Any, Any, _T]]: """Decorator for a Sanic handler that errors out if the user is not an admin.""" @wraps(f) @@ -122,7 +149,7 @@ async def decorated_function(request: Request, user: APIUser, *args: _P.args, ** return decorated_function -def only_authenticated(f: Callable[_P, Awaitable[_T]]) -> Callable[_P, Awaitable[_T]]: +def only_authenticated(f: Callable[_P, Coroutine[Any, Any, _T]]) -> Callable[_P, Coroutine[Any, Any, _T]]: """Decorator that errors out if the user is not authenticated. It looks for APIUser in the named or unnamed parameters. @@ -153,3 +180,30 @@ async def decorated_function(*args: _P.args, **kwargs: _P.kwargs) -> _T: return response return decorated_function + + +def internal_gitlab_authenticate( + authenticator: Authenticator, +) -> Callable[ + [Callable[Concatenate[Request, APIUser, APIUser, _P], Coroutine[Any, Any, _T]]], + Callable[Concatenate[Request, APIUser, _P], Coroutine[Any, Any, _T]], +]: + """Decorator for a Sanic handler that that adds a user for the internal gitlab user.""" + + def decorator( + f: Callable[Concatenate[Request, APIUser, APIUser, _P], Coroutine[Any, Any, _T]], + ) -> Callable[Concatenate[Request, APIUser, _P], Coroutine[Any, Any, _T]]: + @wraps(f) + async def decorated_function( + request: Request, + user: APIUser, + *args: _P.args, + **kwargs: _P.kwargs, + ) -> _T: + access_token = str(request.headers.get("Gitlab-Access-Token")) + internal_gitlab_user = await authenticator.authenticate(access_token, request) + return await f(request, user, internal_gitlab_user, *args, **kwargs) + + return decorated_function + + return decorator diff --git a/components/renku_data_services/base_api/blueprint.py b/components/renku_data_services/base_api/blueprint.py index f72fcdd16..3a76882c4 100644 --- a/components/renku_data_services/base_api/blueprint.py +++ b/components/renku_data_services/base_api/blueprint.py @@ -3,7 +3,7 @@ from collections.abc import Callable from dataclasses import dataclass, field from inspect import getmembers, ismethod -from typing import Optional, cast +from typing import cast from sanic import Blueprint from sanic.models.handler_types import RequestMiddlewareType, ResponseMiddlewareType, RouteHandler @@ -21,7 +21,7 @@ class CustomBlueprint: """ name: str - url_prefix: Optional[str] = None + url_prefix: str request_middlewares: list[RequestMiddlewareType] = field(default_factory=list, repr=False) response_middlewares: list[ResponseMiddlewareType] = field(default_factory=list, repr=False) diff --git a/components/renku_data_services/base_api/etag.py b/components/renku_data_services/base_api/etag.py index 7becd15ce..575869e4d 100644 --- a/components/renku_data_services/base_api/etag.py +++ b/components/renku_data_services/base_api/etag.py @@ -1,6 +1,6 @@ """Enitity tag decorators for Sanic.""" -from collections.abc import Awaitable, Callable, Coroutine +from collections.abc import Callable, Coroutine from functools import wraps from typing import Any, Concatenate, ParamSpec, TypeVar @@ -13,8 +13,8 @@ def if_match_required( - f: Callable[Concatenate[Request, _P], Awaitable[_T]], -) -> Callable[Concatenate[Request, _P], Awaitable[_T]]: + f: Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]], +) -> Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]]: """Decorator that errors out if the "If-Match" header is not present.""" @wraps(f) @@ -31,7 +31,7 @@ async def decorated_function(request: Request, *args: _P.args, **kwargs: _P.kwar def extract_if_none_match( - f: Callable[Concatenate[Request, _P], Awaitable[_T]], + f: Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]], ) -> Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]]: """Decorator which extracts the "If-None-Match" header if present.""" diff --git a/components/renku_data_services/base_api/misc.py b/components/renku_data_services/base_api/misc.py index 5053c3190..2a8f9d11f 100644 --- a/components/renku_data_services/base_api/misc.py +++ b/components/renku_data_services/base_api/misc.py @@ -5,7 +5,7 @@ from functools import wraps from typing import Any, Concatenate, NoReturn, ParamSpec, TypeVar, cast -from pydantic import BaseModel +from pydantic import BaseModel, RootModel from sanic import Request, json from sanic.response import JSONResponse from sanic_ext import validate @@ -97,3 +97,28 @@ async def decorated_function(request: Request, *args: _P.args, **kwargs: _P.kwar return decorated_function return decorator + + +def validate_body_root_model( + json: type[RootModel], +) -> Callable[ + [Callable[Concatenate[Request, _P], Awaitable[_T]]], + Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]], +]: + """Decorator for sanic json payload validation when the model is derived from RootModel. + + Should be removed once sanic fixes this error in their validation code. + """ + + def decorator( + f: Callable[Concatenate[Request, _P], Awaitable[_T]], + ) -> Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]]: + @wraps(f) + async def decorated_function(request: Request, *args: _P.args, **kwargs: _P.kwargs) -> _T: + if request.json is not None: + request.parsed_json = {"root": request.parsed_json} # type: ignore[assignment] + return await validate(json=json)(f)(request, *args, **kwargs) + + return decorated_function + + return decorator diff --git a/components/renku_data_services/base_api/pagination.py b/components/renku_data_services/base_api/pagination.py index 4a5b0b8be..7438283a3 100644 --- a/components/renku_data_services/base_api/pagination.py +++ b/components/renku_data_services/base_api/pagination.py @@ -1,6 +1,6 @@ """Classes and decorators used for paginating long responses.""" -from collections.abc import Awaitable, Callable, Sequence +from collections.abc import Callable, Coroutine, Sequence from functools import wraps from math import ceil from typing import Any, Concatenate, NamedTuple, ParamSpec, cast @@ -57,8 +57,8 @@ def as_header(self) -> dict[str, str]: def paginate( - f: Callable[Concatenate[Request, _P], Awaitable[tuple[Sequence[Any], int]]], -) -> Callable[Concatenate[Request, _P], Awaitable[JSONResponse]]: + f: Callable[Concatenate[Request, _P], Coroutine[Any, Any, tuple[Sequence[Any], int]]], +) -> Callable[Concatenate[Request, _P], Coroutine[Any, Any, JSONResponse]]: """Serializes the response to JSON and adds the required pagination headers to the response. The handler should return first the list of items and then the total count from the DB. diff --git a/components/renku_data_services/base_models/core.py b/components/renku_data_services/base_models/core.py index b0143d238..fe4f32fe7 100644 --- a/components/renku_data_services/base_models/core.py +++ b/components/renku_data_services/base_models/core.py @@ -3,41 +3,72 @@ import re import unicodedata from dataclasses import dataclass, field +from datetime import datetime from enum import Enum, StrEnum -from typing import ClassVar, Optional, Protocol, Self +from typing import ClassVar, Optional, Protocol, Self, TypeVar from sanic import Request from renku_data_services.errors import errors -class Authenticator(Protocol): - """Interface for authenticating users.""" - - token_field: str - - async def authenticate(self, access_token: str, request: Request) -> "APIUser": - """Validates the user credentials (i.e. we can say that the user is a valid Renku user).""" - ... - - -@dataclass(kw_only=True) +@dataclass(kw_only=True, frozen=True) class APIUser: """The model for a user of the API, used for authentication.""" + id: str | None = None # the sub claim in the access token - i.e. the Keycloak user ID + access_token: str | None = field(repr=False, default=None) + refresh_token: str | None = field(repr=False, default=None) + full_name: str | None = None + first_name: str | None = None + last_name: str | None = None + email: str | None = None + access_token_expires_at: datetime | None = None is_admin: bool = False - id: Optional[str] = None # the sub claim in the access token - i.e. the Keycloak user ID - access_token: Optional[str] = field(repr=False, default=None) - full_name: Optional[str] = None - first_name: Optional[str] = None - last_name: Optional[str] = None - email: Optional[str] = None @property def is_authenticated(self) -> bool: """Indicates whether the user has successfully logged in.""" return self.id is not None + @property + def is_anonymous(self) -> bool: + """Indicates whether the user is anonymous.""" + return isinstance(self, AnonymousAPIUser) + + def get_full_name(self) -> str | None: + """Generate the closest thing to a full name if the full name field is not set.""" + full_name = self.full_name or " ".join(filter(None, (self.first_name, self.last_name))) + if len(full_name) == 0: + return None + return full_name + + +@dataclass(kw_only=True, frozen=True) +class AuthenticatedAPIUser(APIUser): + """The model for a an authenticated user of the API.""" + + id: str + email: str + access_token: str = field(repr=False) + refresh_token: str | None = field(default=None, repr=False) + full_name: str | None = None + first_name: str | None = None + last_name: str | None = None + + +@dataclass(kw_only=True, frozen=True) +class AnonymousAPIUser(APIUser): + """The model for an anonymous user of the API.""" + + id: str + is_admin: bool = field(init=False, default=False) + + @property + def is_authenticated(self) -> bool: + """We cannot authenticate anonymous users, so this is by definition False.""" + return False + class ServiceAdminId(StrEnum): """Types of internal service admins.""" @@ -46,18 +77,22 @@ class ServiceAdminId(StrEnum): secrets_rotation = "secrets_rotation" -@dataclass(kw_only=True) +@dataclass(kw_only=True, frozen=True) class InternalServiceAdmin(APIUser): """Used to gain complete admin access by internal code components when performing tasks not started by users.""" id: ServiceAdminId = ServiceAdminId.migrations - is_admin: bool = field(default=True, init=False) - access_token: Optional[str] = field(repr=False, default=None, init=False) - full_name: Optional[str] = field(default=None, init=False) - first_name: Optional[str] = field(default=None, init=False) - last_name: Optional[str] = field(default=None, init=False) - email: Optional[str] = field(default=None, init=False) - is_authenticated: bool = field(default=True, init=False) + access_token: str = field(repr=False, default="internal-service-admin", init=False) + full_name: str | None = field(default=None, init=False) + first_name: str | None = field(default=None, init=False) + last_name: str | None = field(default=None, init=False) + email: str | None = field(default=None, init=False) + is_admin: bool = field(init=False, default=True) + + @property + def is_authenticated(self) -> bool: + """Internal admin users are always authenticated.""" + return True class GitlabAccessLevel(Enum): @@ -164,3 +199,16 @@ def __true_div__(self, other: "Slug") -> str: message=f"A path can be constructed only from 2 slugs, but the 'divisor' is of type {type(other)}" ) return self.value + "/" + other.value + + +AnyAPIUser = TypeVar("AnyAPIUser", bound=APIUser, covariant=True) + + +class Authenticator(Protocol[AnyAPIUser]): + """Interface for authenticating users.""" + + token_field: str + + async def authenticate(self, access_token: str, request: Request) -> AnyAPIUser: + """Validates the user credentials (i.e. we can say that the user is a valid Renku user).""" + ... diff --git a/components/renku_data_services/crc/db.py b/components/renku_data_services/crc/db.py index a3055a03b..4b42b3edd 100644 --- a/components/renku_data_services/crc/db.py +++ b/components/renku_data_services/crc/db.py @@ -7,16 +7,15 @@ """ from asyncio import gather -from collections.abc import Awaitable, Callable, Collection, Sequence +from collections.abc import Callable, Collection, Coroutine, Sequence from dataclasses import dataclass, field from functools import wraps from typing import Any, Concatenate, Optional, ParamSpec, TypeVar, cast -from sqlalchemy import NullPool, create_engine, delete, select +from sqlalchemy import NullPool, create_engine, delete, false, select, true from sqlalchemy.ext.asyncio import AsyncSession from sqlalchemy.orm import Session, selectinload, sessionmaker from sqlalchemy.sql import Select, and_, not_, or_ -from sqlalchemy.sql.expression import false, true import renku_data_services.base_models as base_models from renku_data_services import errors @@ -116,7 +115,9 @@ def _classes_user_access_control( _T = TypeVar("_T") -def _only_admins(f: Callable[Concatenate[Any, _P], Awaitable[_T]]) -> Callable[Concatenate[Any, _P], Awaitable[_T]]: +def _only_admins( + f: Callable[Concatenate[Any, _P], Coroutine[Any, Any, _T]], +) -> Callable[Concatenate[Any, _P], Coroutine[Any, Any, _T]]: """Decorator that errors out if the user is not an admin. It expects the APIUser model to be a named parameter in the decorated function or @@ -183,6 +184,21 @@ async def get_resource_pools( output.append(rp.dump(quota)) return output + async def get_default_resource_class(self) -> models.ResourceClass: + """Get the default resource class in the default resource pool.""" + async with self.session_maker() as session: + stmt = ( + select(schemas.ResourceClassORM) + .where(schemas.ResourceClassORM.default == true()) + .where(schemas.ResourceClassORM.resource_pool.has(schemas.ResourcePoolORM.default == true())) + ) + res = await session.scalar(stmt) + if res is None: + raise errors.ProgrammingError( + message="Could not find the default class from the default resource pool, but this has to exist." + ) + return res.dump() + async def filter_resource_pools( self, api_user: base_models.APIUser, @@ -277,6 +293,13 @@ async def get_classes( orms = res.scalars().all() return [orm.dump() for orm in orms] + async def get_resource_class(self, api_user: base_models.APIUser, id: int) -> models.ResourceClass: + """Get a specific resource class by its ID.""" + classes = await self.get_classes(api_user, id) + if len(classes) == 0: + raise errors.MissingResourceError(message=f"The resource class with ID {id} cannot be found", quiet=True) + return classes[0] + @_only_admins async def insert_resource_class( self, diff --git a/components/renku_data_services/data_connectors/__init__.py b/components/renku_data_services/data_connectors/__init__.py new file mode 100644 index 000000000..9a7285ee0 --- /dev/null +++ b/components/renku_data_services/data_connectors/__init__.py @@ -0,0 +1 @@ +"""Data connectors module.""" diff --git a/components/renku_data_services/data_connectors/api.spec.yaml b/components/renku_data_services/data_connectors/api.spec.yaml new file mode 100644 index 000000000..dd7b0633c --- /dev/null +++ b/components/renku_data_services/data_connectors/api.spec.yaml @@ -0,0 +1,745 @@ +openapi: 3.0.2 +info: + title: Renku Data Services API + description: | + This service is the main backend for Renku. It provides information about users, projects, + cloud storage, access to compute resources and many other things. + version: v1 +servers: + - url: /api/data + - url: /ui-server/api/data +paths: + /data_connectors: + get: + summary: Get all data connectors + parameters: + - in: query + description: query parameters + name: params + style: form + explode: true + schema: + $ref: "#/components/schemas/DataConnectorsGetQuery" + responses: + "200": + description: List of data connectors + content: + "application/json": + schema: + $ref: "#/components/schemas/DataConnectorsList" + headers: + page: + description: The index of the current page (starting at 1). + required: true + schema: + type: integer + per-page: + description: The number of items per page. + required: true + schema: + type: integer + total: + description: The total number of items. + required: true + schema: + type: integer + total-pages: + description: The total number of pages. + required: true + schema: + type: integer + default: + $ref: "#/components/responses/Error" + tags: + - data connectors + post: + summary: Create a new data connector + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/DataConnectorPost" + responses: + "201": + description: The data connector was created + content: + application/json: + schema: + $ref: "#/components/schemas/DataConnector" + default: + $ref: "#/components/responses/Error" + tags: + - data connectors + /data_connectors/{data_connector_id}: + parameters: + - in: path + name: data_connector_id + required: true + schema: + $ref: "#/components/schemas/Ulid" + description: the ID of the data connector + get: + summary: Get data connector details + responses: + "200": + description: The data connector + content: + application/json: + schema: + $ref: "#/components/schemas/DataConnector" + "404": + description: The data connector does not exist + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + default: + $ref: "#/components/responses/Error" + tags: + - data connectors + patch: + summary: Update specific fields of an existing data connector + parameters: + - $ref: "#/components/parameters/If-Match" + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/DataConnectorPatch" + responses: + "200": + description: The patched data connector + content: + application/json: + schema: + $ref: "#/components/schemas/DataConnector" + "404": + description: The data connector does not exist + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + default: + $ref: "#/components/responses/Error" + tags: + - data connectors + delete: + summary: Remove a data connector + responses: + "204": + description: The data connector was removed or did not exist in the first place + default: + $ref: "#/components/responses/Error" + tags: + - data connectors + /namespaces/{namespace}/data_connectors/{slug}: + parameters: + - in: path + name: namespace + required: true + schema: + type: string + - in: path + name: slug + required: true + schema: + type: string + get: + summary: Get a data connector by namespace and project slug + responses: + "200": + description: The data connector + content: + application/json: + schema: + $ref: "#/components/schemas/DataConnector" + "404": + description: The data connector does not exist + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + default: + $ref: "#/components/responses/Error" + tags: + - data connectors + /data_connectors/{data_connector_id}/project_links: + parameters: + - in: path + name: data_connector_id + required: true + schema: + $ref: "#/components/schemas/Ulid" + description: the ID of the data connector + get: + summary: Get all links from a given data connector to projects + responses: + "200": + description: List of data connector to project links + content: + application/json: + schema: + $ref: "#/components/schemas/DataConnectorToProjectLinksList" + default: + $ref: "#/components/responses/Error" + tags: + - data connectors + post: + summary: Create a new link from a data connector to a project + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/DataConnectorToProjectLinkPost" + responses: + "201": + description: The data connector was connected to a project + content: + application/json: + schema: + $ref: "#/components/schemas/DataConnectorToProjectLink" + default: + $ref: "#/components/responses/Error" + tags: + - data connectors + /data_connectors/{data_connector_id}/project_links/{link_id}: + parameters: + - in: path + name: data_connector_id + required: true + schema: + $ref: "#/components/schemas/Ulid" + description: the ID of the data connector + - in: path + name: link_id + required: true + schema: + $ref: "#/components/schemas/Ulid" + description: the ID of the link between a data connector and a project + delete: + summary: Remove a link from a data connector to a project + responses: + "204": + description: The data connector was removed or did not exist in the first place + default: + $ref: "#/components/responses/Error" + tags: + - data connectors + /data_connectors/{data_connector_id}/secrets: + parameters: + - in: path + name: data_connector_id + required: true + schema: + $ref: "#/components/schemas/Ulid" + description: the ID of the data connector + get: + summary: Get all saved secrets for a data connector + responses: + "200": + description: The saved storage secrets + content: + "application/json": + schema: + $ref: "#/components/schemas/DataConnectorSecretsList" + "404": + description: Storage was not found + content: + "application/json": + schema: + $ref: "#/components/schemas/ErrorResponse" + default: + $ref: "#/components/responses/Error" + tags: + - data connectors + patch: + summary: Save secrets for a data connector + description: New secrets will be added and existing secrets will have their value updated. Using `null` as a value will remove the corresponding secret. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/DataConnectorSecretPatchList" + responses: + "201": + description: The secrets for cloud storage were saved + content: + "application/json": + schema: + $ref: "#/components/schemas/DataConnectorSecretsList" + default: + $ref: "#/components/responses/Error" + tags: + - data connectors + delete: + summary: Remove all saved secrets for a data connector + responses: + "204": + description: The secrets were removed or did not exist in the first place or the storage doesn't exist + default: + $ref: "#/components/responses/Error" + tags: + - data connectors +components: + schemas: + DataConnectorsList: + description: A list of data connectors + type: array + items: + $ref: "#/components/schemas/DataConnector" + DataConnector: + description: | + A data connector for Renku 2.0 for mounting remote data storage + type: object + additionalProperties: false + properties: + id: + $ref: "#/components/schemas/Ulid" + name: + $ref: "#/components/schemas/DataConnectorName" + namespace: + $ref: "#/components/schemas/Slug" + slug: + $ref: "#/components/schemas/Slug" + storage: + $ref: "#/components/schemas/CloudStorageCore" + secrets: + type: array + items: + $ref: "#/components/schemas/DataConnectorSecret" + creation_date: + $ref: "#/components/schemas/CreationDate" + created_by: + $ref: "#/components/schemas/UserId" + visibility: + $ref: "#/components/schemas/Visibility" + description: + $ref: "#/components/schemas/Description" + etag: + $ref: "#/components/schemas/ETag" + keywords: + $ref: "#/components/schemas/KeywordsList" + required: + - id + - name + - namespace + - slug + - storage + - creation_date + - created_by + - visibility + - etag + DataConnectorPost: + description: | + A data connector to be created in Renku 2.0 + type: object + additionalProperties: false + properties: + name: + $ref: "#/components/schemas/DataConnectorName" + namespace: + $ref: "#/components/schemas/Slug" + slug: + $ref: "#/components/schemas/Slug" + storage: + oneOf: + - $ref: "#/components/schemas/CloudStorageCorePost" + - $ref: "#/components/schemas/CloudStorageUrlV2" + visibility: + $ref: "#/components/schemas/Visibility" # Visibility is ``private`` if not passed at this point + default: "private" + description: + $ref: "#/components/schemas/Description" + keywords: + $ref: "#/components/schemas/KeywordsList" + required: + - name + - namespace + - storage + DataConnectorPatch: + description: | + Patch of a data connector + type: object + additionalProperties: false + properties: + name: + $ref: "#/components/schemas/DataConnectorName" + namespace: + $ref: "#/components/schemas/Slug" + slug: + $ref: "#/components/schemas/Slug" + storage: + $ref: "#/components/schemas/CloudStorageCorePatch" + visibility: + $ref: "#/components/schemas/Visibility" + description: + $ref: "#/components/schemas/Description" + keywords: + $ref: "#/components/schemas/KeywordsList" + CloudStorageCore: + description: Represents the configuration used to mount remote data storage + type: object + additionalProperties: false + properties: + storage_type: + $ref: "#/components/schemas/StorageType" + configuration: + $ref: "#/components/schemas/RCloneConfig" + source_path: + $ref: "#/components/schemas/SourcePath" + target_path: + $ref: "#/components/schemas/TargetPath" + readonly: + $ref: "#/components/schemas/StorageReadOnly" + sensitive_fields: + type: array + items: + $ref: "#/components/schemas/RCloneOption" + required: + - storage_type + - configuration + - source_path + - target_path + - readonly + - sensitive_fields + CloudStorageCorePost: + type: object + additionalProperties: false + properties: + storage_type: + $ref: "#/components/schemas/StorageType" + configuration: + $ref: "#/components/schemas/RCloneConfig" + source_path: + $ref: "#/components/schemas/SourcePath" + target_path: + $ref: "#/components/schemas/TargetPath" + readonly: + $ref: "#/components/schemas/StorageReadOnly" + default: true + required: + - configuration + - source_path + - target_path + CloudStorageCorePatch: + type: object + additionalProperties: false + properties: + storage_type: + $ref: "#/components/schemas/StorageType" + configuration: + $ref: "#/components/schemas/RCloneConfig" + source_path: + $ref: "#/components/schemas/SourcePath" + target_path: + $ref: "#/components/schemas/TargetPath" + readonly: + $ref: "#/components/schemas/StorageReadOnly" + RCloneConfig: + type: object + description: Dictionary of rclone key:value pairs (based on schema from '/storage_schema') + additionalProperties: + oneOf: + - type: integer + - type: string + nullable: true + - type: boolean + - type: object + CloudStorageUrlV2: + type: object + properties: + storage_url: + type: string + target_path: + $ref: "#/components/schemas/TargetPath" + readonly: + $ref: "#/components/schemas/StorageReadOnly" + default: true + required: + - storage_url + - target_path + example: + storage_url: s3://giab + DataConnectorToProjectLinksList: + description: A list of links from a data connector to a project + type: array + items: + $ref: "#/components/schemas/DataConnectorToProjectLink" + DataConnectorToProjectLink: + description: A link from a data connector to a project in Renku 2.0 + type: object + additionalProperties: false + properties: + id: + $ref: "#/components/schemas/Ulid" + data_connector_id: + $ref: "#/components/schemas/Ulid" + project_id: + $ref: "#/components/schemas/Ulid" + creation_date: + $ref: "#/components/schemas/CreationDate" + created_by: + $ref: "#/components/schemas/UserId" + required: + - id + - data_connector_id + - project_id + - creation_date + - created_by + DataConnectorToProjectLinkPost: + description: A link to be created from a data connector to a project in Renku 2.0 + type: object + additionalProperties: false + properties: + project_id: + $ref: "#/components/schemas/Ulid" + required: + - project_id + DataConnectorSecretsList: + description: A list of data connectors + type: array + items: + $ref: "#/components/schemas/DataConnectorSecret" + DataConnectorSecret: + description: Information about a credential saved for a data connector + type: object + properties: + name: + $ref: "#/components/schemas/DataConnectorName" + secret_id: + $ref: "#/components/schemas/Ulid" + required: + - name + - secret_id + DataConnectorSecretPatchList: + description: List of secrets to be saved for a data connector + type: array + items: + $ref: "#/components/schemas/DataConnectorSecretPatch" + DataConnectorSecretPatch: + description: Information about a credential to save for a data connector + properties: + name: + $ref: "#/components/schemas/DataConnectorName" + value: + $ref: "#/components/schemas/SecretValueNullable" + required: + - name + - value + SecretValueNullable: + description: Secret value that can be any text + type: string + minLength: 1 + maxLength: 5000 + nullable: true + RCloneOption: + type: object + description: Single field on an RClone storage, like "remote" or "access_key_id" + properties: + name: + type: string + description: name of the option + help: + type: string + description: help text for the option + provider: + type: string + description: The cloud provider the option is for (See 'provider' RCloneOption in the schema for potential values) + example: AWS + default: + oneOf: + - type: number + - type: string + - type: boolean + - type: object + - type: array + description: default value for the option + default_str: + type: string + description: string representation of the default value + examples: + description: "These list potential values for this option, like an enum. With `exclusive: true`, only a value from the list is allowed." + type: array + items: + type: object + properties: + value: + type: string + description: a potential value for the option (think enum) + help: + type: string + description: help text for the value + provider: + type: string + description: The provider this value is applicable for. Empty if valid for all providers. + required: + type: boolean + description: whether the option is required or not + ispassword: + type: boolean + description: whether the field is a password (use **** for display) + sensitive: + type: boolean + description: whether the value is sensitive (not stored in the service). Do not send this in requests to the service. + advanced: + type: boolean + description: whether this is an advanced config option (probably don't show these to users) + exclusive: + type: boolean + description: if true, only values from 'examples' can be used + datatype: + type: string + description: data type of option value. RClone has more options but they map to the ones listed here. + enum: ["int", "bool", "string", "Time"] + Ulid: + description: ULID identifier + type: string + minLength: 26 + maxLength: 26 + pattern: "^[0-7][0-9A-HJKMNP-TV-Z]{25}$" # This is case-insensitive + Slug: + description: A command-line/url friendly name for a namespace + type: string + minLength: 1 + maxLength: 99 + # Slug regex rules + # from https://docs.gitlab.com/ee/user/reserved_names.html#limitations-on-usernames-project-and-group-names + # - cannot end in .git + # - cannot end in .atom + # - cannot contain any combination of two or more consecutive -._ + # - has to start with letter or number + pattern: '^(?!.*\.git$|.*\.atom$|.*[\-._][\-._].*)[a-zA-Z0-9][a-zA-Z0-9\-_.]*$' + example: "a-slug-example" + CreationDate: + description: The date and time the resource was created (in UTC and ISO-8601 format) + type: string + format: date-time + example: "2023-11-01T17:32:28Z" + UserId: + type: string + description: Keycloak user ID + example: f74a228b-1790-4276-af5f-25c2424e9b0c + pattern: "^[A-Za-z0-9]{1}[A-Za-z0-9-]+$" + Visibility: + description: Project's visibility levels + type: string + enum: + - private + - public + Description: + description: A description for the resource + type: string + maxLength: 500 + KeywordsList: + description: Project keywords + type: array + items: + $ref: "#/components/schemas/Keyword" + minItems: 0 + example: + - "project" + - "keywords" + Keyword: + description: A single keyword + type: string + minLength: 1 + maxLength: 99 + pattern: '^[A-Za-z0-9\s\-_.]*$' + DataConnectorName: + description: Renku data connector name + type: string + minLength: 1 + maxLength: 99 + example: "My Remote Data :)" + SourcePath: + description: the source path to mount, usually starts with bucket/container name + type: string + example: bucket/my/storage/folder/ + TargetPath: + description: the target path relative to the working directory where the storage should be mounted + type: string + example: my/project/folder + StorageType: + description: same as rclone prefix/ rclone config type. Ignored in requests, but returned in responses for convenience. + type: string + readOnly: true + StorageReadOnly: + description: Whether this storage should be mounted readonly or not + type: boolean + default: true + ETag: + type: string + description: Entity Tag + example: "9EE498F9D565D0C41E511377425F32F3" + DataConnectorsGetQuery: + description: Query params for data connectors get request + allOf: + - $ref: "#/components/schemas/PaginationRequest" + - properties: + namespace: + description: A namespace, used as a filter. + type: string + default: "" + PaginationRequest: + type: object + additionalProperties: false + properties: + page: + description: Result's page number starting from 1 + type: integer + minimum: 1 + default: 1 + per_page: + description: The number of results per page + type: integer + minimum: 1 + maximum: 100 + default: 20 + ErrorResponse: + type: object + properties: + error: + type: object + properties: + code: + type: integer + minimum: 0 + exclusiveMinimum: true + example: 1404 + detail: + type: string + example: "A more detailed optional message showing what the problem was" + message: + type: string + example: "Something went wrong - please try again later" + required: ["code", "message"] + required: ["error"] + + responses: + Error: + description: The schema for all 4xx and 5xx responses + content: + "application/json": + schema: + $ref: "#/components/schemas/ErrorResponse" + + parameters: + If-Match: + in: header + name: If-Match + description: If-Match header, for avoiding mid-air collisions + required: true + schema: + $ref: "#/components/schemas/ETag" + + securitySchemes: + oidc: + type: openIdConnect + openIdConnectUrl: /auth/realms/Renku/.well-known/openid-configuration +security: + - oidc: + - openid diff --git a/components/renku_data_services/data_connectors/apispec.py b/components/renku_data_services/data_connectors/apispec.py new file mode 100644 index 000000000..d72c0208e --- /dev/null +++ b/components/renku_data_services/data_connectors/apispec.py @@ -0,0 +1,451 @@ +# generated by datamodel-codegen: +# filename: api.spec.yaml +# timestamp: 2024-09-24T11:47:00+00:00 + +from __future__ import annotations + +from datetime import datetime +from enum import Enum +from typing import Any, Dict, List, Optional, Union + +from pydantic import ConfigDict, Field, RootModel +from renku_data_services.data_connectors.apispec_base import BaseAPISpec + + +class Example(BaseAPISpec): + value: Optional[str] = Field( + None, description="a potential value for the option (think enum)" + ) + help: Optional[str] = Field(None, description="help text for the value") + provider: Optional[str] = Field( + None, + description="The provider this value is applicable for. Empty if valid for all providers.", + ) + + +class Datatype(Enum): + int = "int" + bool = "bool" + string = "string" + Time = "Time" + + +class RCloneOption(BaseAPISpec): + name: Optional[str] = Field(None, description="name of the option") + help: Optional[str] = Field(None, description="help text for the option") + provider: Optional[str] = Field( + None, + description="The cloud provider the option is for (See 'provider' RCloneOption in the schema for potential values)", + example="AWS", + ) + default: Optional[Union[float, str, bool, Dict[str, Any], List]] = Field( + None, description="default value for the option" + ) + default_str: Optional[str] = Field( + None, description="string representation of the default value" + ) + examples: Optional[List[Example]] = Field( + None, + description="These list potential values for this option, like an enum. With `exclusive: true`, only a value from the list is allowed.", + ) + required: Optional[bool] = Field( + None, description="whether the option is required or not" + ) + ispassword: Optional[bool] = Field( + None, description="whether the field is a password (use **** for display)" + ) + sensitive: Optional[bool] = Field( + None, + description="whether the value is sensitive (not stored in the service). Do not send this in requests to the service.", + ) + advanced: Optional[bool] = Field( + None, + description="whether this is an advanced config option (probably don't show these to users)", + ) + exclusive: Optional[bool] = Field( + None, description="if true, only values from 'examples' can be used" + ) + datatype: Optional[Datatype] = Field( + None, + description="data type of option value. RClone has more options but they map to the ones listed here.", + ) + + +class Visibility(Enum): + private = "private" + public = "public" + + +class Keyword(RootModel[str]): + root: str = Field( + ..., + description="A single keyword", + max_length=99, + min_length=1, + pattern="^[A-Za-z0-9\\s\\-_.]*$", + ) + + +class PaginationRequest(BaseAPISpec): + model_config = ConfigDict( + extra="forbid", + ) + page: int = Field(1, description="Result's page number starting from 1", ge=1) + per_page: int = Field( + 20, description="The number of results per page", ge=1, le=100 + ) + + +class Error(BaseAPISpec): + code: int = Field(..., example=1404, gt=0) + detail: Optional[str] = Field( + None, example="A more detailed optional message showing what the problem was" + ) + message: str = Field(..., example="Something went wrong - please try again later") + + +class ErrorResponse(BaseAPISpec): + error: Error + + +class CloudStorageCore(BaseAPISpec): + model_config = ConfigDict( + extra="forbid", + ) + storage_type: str = Field( + ..., + description="same as rclone prefix/ rclone config type. Ignored in requests, but returned in responses for convenience.", + ) + configuration: Dict[str, Union[int, Optional[str], bool, Dict[str, Any]]] + source_path: str = Field( + ..., + description="the source path to mount, usually starts with bucket/container name", + example="bucket/my/storage/folder/", + ) + target_path: str = Field( + ..., + description="the target path relative to the working directory where the storage should be mounted", + example="my/project/folder", + ) + readonly: bool = Field( + ..., description="Whether this storage should be mounted readonly or not" + ) + sensitive_fields: List[RCloneOption] + + +class CloudStorageCorePost(BaseAPISpec): + model_config = ConfigDict( + extra="forbid", + ) + storage_type: Optional[str] = Field( + None, + description="same as rclone prefix/ rclone config type. Ignored in requests, but returned in responses for convenience.", + ) + configuration: Dict[str, Union[int, Optional[str], bool, Dict[str, Any]]] + source_path: str = Field( + ..., + description="the source path to mount, usually starts with bucket/container name", + example="bucket/my/storage/folder/", + ) + target_path: str = Field( + ..., + description="the target path relative to the working directory where the storage should be mounted", + example="my/project/folder", + ) + readonly: bool = Field( + True, description="Whether this storage should be mounted readonly or not" + ) + + +class CloudStorageCorePatch(BaseAPISpec): + model_config = ConfigDict( + extra="forbid", + ) + storage_type: Optional[str] = Field( + None, + description="same as rclone prefix/ rclone config type. Ignored in requests, but returned in responses for convenience.", + ) + configuration: Optional[ + Dict[str, Union[int, Optional[str], bool, Dict[str, Any]]] + ] = None + source_path: Optional[str] = Field( + None, + description="the source path to mount, usually starts with bucket/container name", + example="bucket/my/storage/folder/", + ) + target_path: Optional[str] = Field( + None, + description="the target path relative to the working directory where the storage should be mounted", + example="my/project/folder", + ) + readonly: Optional[bool] = Field( + None, description="Whether this storage should be mounted readonly or not" + ) + + +class CloudStorageUrlV2(BaseAPISpec): + storage_url: str + target_path: str = Field( + ..., + description="the target path relative to the working directory where the storage should be mounted", + example="my/project/folder", + ) + readonly: bool = Field( + True, description="Whether this storage should be mounted readonly or not" + ) + + +class DataConnectorToProjectLink(BaseAPISpec): + model_config = ConfigDict( + extra="forbid", + ) + id: str = Field( + ..., + description="ULID identifier", + max_length=26, + min_length=26, + pattern="^[0-7][0-9A-HJKMNP-TV-Z]{25}$", + ) + data_connector_id: str = Field( + ..., + description="ULID identifier", + max_length=26, + min_length=26, + pattern="^[0-7][0-9A-HJKMNP-TV-Z]{25}$", + ) + project_id: str = Field( + ..., + description="ULID identifier", + max_length=26, + min_length=26, + pattern="^[0-7][0-9A-HJKMNP-TV-Z]{25}$", + ) + creation_date: datetime = Field( + ..., + description="The date and time the resource was created (in UTC and ISO-8601 format)", + example="2023-11-01T17:32:28Z", + ) + created_by: str = Field( + ..., + description="Keycloak user ID", + example="f74a228b-1790-4276-af5f-25c2424e9b0c", + pattern="^[A-Za-z0-9]{1}[A-Za-z0-9-]+$", + ) + + +class DataConnectorToProjectLinkPost(BaseAPISpec): + model_config = ConfigDict( + extra="forbid", + ) + project_id: str = Field( + ..., + description="ULID identifier", + max_length=26, + min_length=26, + pattern="^[0-7][0-9A-HJKMNP-TV-Z]{25}$", + ) + + +class DataConnectorSecret(BaseAPISpec): + name: str = Field( + ..., + description="Renku data connector name", + example="My Remote Data :)", + max_length=99, + min_length=1, + ) + secret_id: str = Field( + ..., + description="ULID identifier", + max_length=26, + min_length=26, + pattern="^[0-7][0-9A-HJKMNP-TV-Z]{25}$", + ) + + +class DataConnectorSecretPatch(BaseAPISpec): + name: str = Field( + ..., + description="Renku data connector name", + example="My Remote Data :)", + max_length=99, + min_length=1, + ) + value: Optional[str] = Field( + ..., + description="Secret value that can be any text", + max_length=5000, + min_length=1, + ) + + +class DataConnectorsGetQuery(PaginationRequest): + namespace: str = Field("", description="A namespace, used as a filter.") + + +class DataConnectorsGetParametersQuery(BaseAPISpec): + params: Optional[DataConnectorsGetQuery] = None + + +class DataConnector(BaseAPISpec): + model_config = ConfigDict( + extra="forbid", + ) + id: str = Field( + ..., + description="ULID identifier", + max_length=26, + min_length=26, + pattern="^[0-7][0-9A-HJKMNP-TV-Z]{25}$", + ) + name: str = Field( + ..., + description="Renku data connector name", + example="My Remote Data :)", + max_length=99, + min_length=1, + ) + namespace: str = Field( + ..., + description="A command-line/url friendly name for a namespace", + example="a-slug-example", + max_length=99, + min_length=1, + pattern="^(?!.*\\.git$|.*\\.atom$|.*[\\-._][\\-._].*)[a-zA-Z0-9][a-zA-Z0-9\\-_.]*$", + ) + slug: str = Field( + ..., + description="A command-line/url friendly name for a namespace", + example="a-slug-example", + max_length=99, + min_length=1, + pattern="^(?!.*\\.git$|.*\\.atom$|.*[\\-._][\\-._].*)[a-zA-Z0-9][a-zA-Z0-9\\-_.]*$", + ) + storage: CloudStorageCore + secrets: Optional[List[DataConnectorSecret]] = None + creation_date: datetime = Field( + ..., + description="The date and time the resource was created (in UTC and ISO-8601 format)", + example="2023-11-01T17:32:28Z", + ) + created_by: str = Field( + ..., + description="Keycloak user ID", + example="f74a228b-1790-4276-af5f-25c2424e9b0c", + pattern="^[A-Za-z0-9]{1}[A-Za-z0-9-]+$", + ) + visibility: Visibility + description: Optional[str] = Field( + None, description="A description for the resource", max_length=500 + ) + etag: str = Field( + ..., description="Entity Tag", example="9EE498F9D565D0C41E511377425F32F3" + ) + keywords: Optional[List[Keyword]] = Field( + None, + description="Project keywords", + example=["project", "keywords"], + min_length=0, + ) + + +class DataConnectorPost(BaseAPISpec): + model_config = ConfigDict( + extra="forbid", + ) + name: str = Field( + ..., + description="Renku data connector name", + example="My Remote Data :)", + max_length=99, + min_length=1, + ) + namespace: str = Field( + ..., + description="A command-line/url friendly name for a namespace", + example="a-slug-example", + max_length=99, + min_length=1, + pattern="^(?!.*\\.git$|.*\\.atom$|.*[\\-._][\\-._].*)[a-zA-Z0-9][a-zA-Z0-9\\-_.]*$", + ) + slug: Optional[str] = Field( + None, + description="A command-line/url friendly name for a namespace", + example="a-slug-example", + max_length=99, + min_length=1, + pattern="^(?!.*\\.git$|.*\\.atom$|.*[\\-._][\\-._].*)[a-zA-Z0-9][a-zA-Z0-9\\-_.]*$", + ) + storage: Union[CloudStorageCorePost, CloudStorageUrlV2] + visibility: Visibility = Visibility.private + description: Optional[str] = Field( + None, description="A description for the resource", max_length=500 + ) + keywords: Optional[List[Keyword]] = Field( + None, + description="Project keywords", + example=["project", "keywords"], + min_length=0, + ) + + +class DataConnectorPatch(BaseAPISpec): + model_config = ConfigDict( + extra="forbid", + ) + name: Optional[str] = Field( + None, + description="Renku data connector name", + example="My Remote Data :)", + max_length=99, + min_length=1, + ) + namespace: Optional[str] = Field( + None, + description="A command-line/url friendly name for a namespace", + example="a-slug-example", + max_length=99, + min_length=1, + pattern="^(?!.*\\.git$|.*\\.atom$|.*[\\-._][\\-._].*)[a-zA-Z0-9][a-zA-Z0-9\\-_.]*$", + ) + slug: Optional[str] = Field( + None, + description="A command-line/url friendly name for a namespace", + example="a-slug-example", + max_length=99, + min_length=1, + pattern="^(?!.*\\.git$|.*\\.atom$|.*[\\-._][\\-._].*)[a-zA-Z0-9][a-zA-Z0-9\\-_.]*$", + ) + storage: Optional[CloudStorageCorePatch] = None + visibility: Optional[Visibility] = None + description: Optional[str] = Field( + None, description="A description for the resource", max_length=500 + ) + keywords: Optional[List[Keyword]] = Field( + None, + description="Project keywords", + example=["project", "keywords"], + min_length=0, + ) + + +class DataConnectorToProjectLinksList(RootModel[List[DataConnectorToProjectLink]]): + root: List[DataConnectorToProjectLink] = Field( + ..., description="A list of links from a data connector to a project" + ) + + +class DataConnectorSecretsList(RootModel[List[DataConnectorSecret]]): + root: List[DataConnectorSecret] = Field( + ..., description="A list of data connectors" + ) + + +class DataConnectorSecretPatchList(RootModel[List[DataConnectorSecretPatch]]): + root: List[DataConnectorSecretPatch] = Field( + ..., description="List of secrets to be saved for a data connector" + ) + + +class DataConnectorsList(RootModel[List[DataConnector]]): + root: List[DataConnector] = Field(..., description="A list of data connectors") diff --git a/components/renku_data_services/data_connectors/apispec_base.py b/components/renku_data_services/data_connectors/apispec_base.py new file mode 100644 index 000000000..476c07927 --- /dev/null +++ b/components/renku_data_services/data_connectors/apispec_base.py @@ -0,0 +1,22 @@ +"""Base models for API specifications.""" + +from pydantic import BaseModel, field_validator +from ulid import ULID + + +class BaseAPISpec(BaseModel): + """Base API specification.""" + + class Config: + """Enables orm mode for pydantic.""" + + from_attributes = True + # NOTE: By default the pydantic library does not use python for regex but a rust crate + # this rust crate does not support lookahead regex syntax but we need it in this component + regex_engine = "python-re" + + @field_validator("id", mode="before", check_fields=False) + @classmethod + def serialize_id(cls, id: str | ULID) -> str: + """Custom serializer that can handle ULIDs.""" + return str(id) diff --git a/components/renku_data_services/data_connectors/blueprints.py b/components/renku_data_services/data_connectors/blueprints.py new file mode 100644 index 000000000..869950e4b --- /dev/null +++ b/components/renku_data_services/data_connectors/blueprints.py @@ -0,0 +1,358 @@ +"""Data connectors blueprint.""" + +from dataclasses import dataclass +from typing import Any + +from sanic import Request +from sanic.response import HTTPResponse, JSONResponse +from sanic_ext import validate +from ulid import ULID + +from renku_data_services import base_models +from renku_data_services.base_api.auth import ( + authenticate, + only_authenticated, +) +from renku_data_services.base_api.blueprint import BlueprintFactoryResponse, CustomBlueprint +from renku_data_services.base_api.etag import extract_if_none_match, if_match_required +from renku_data_services.base_api.misc import validate_body_root_model, validate_query +from renku_data_services.base_api.pagination import PaginationRequest, paginate +from renku_data_services.base_models.validation import validate_and_dump, validated_json +from renku_data_services.data_connectors import apispec, models +from renku_data_services.data_connectors.core import ( + dump_storage_with_sensitive_fields, + validate_data_connector_patch, + validate_data_connector_secrets_patch, + validate_unsaved_data_connector, +) +from renku_data_services.data_connectors.db import DataConnectorProjectLinkRepository, DataConnectorRepository +from renku_data_services.storage.rclone import RCloneValidator + + +@dataclass(kw_only=True) +class DataConnectorsBP(CustomBlueprint): + """Handlers for manipulating data connectors.""" + + data_connector_repo: DataConnectorRepository + data_connector_to_project_link_repo: DataConnectorProjectLinkRepository + authenticator: base_models.Authenticator + + def get_all(self) -> BlueprintFactoryResponse: + """List data connectors.""" + + @authenticate(self.authenticator) + @validate_query(query=apispec.DataConnectorsGetQuery) + @paginate + async def _get_all( + _: Request, + user: base_models.APIUser, + pagination: PaginationRequest, + query: apispec.DataConnectorsGetQuery, + validator: RCloneValidator, + ) -> tuple[list[dict[str, Any]], int]: + data_connectors, total_num = await self.data_connector_repo.get_data_connectors( + user=user, + pagination=pagination, + namespace=query.namespace, + ) + return [ + validate_and_dump( + apispec.DataConnector, + self._dump_data_connector(dc, validator=validator), + ) + for dc in data_connectors + ], total_num + + return "/data_connectors", ["GET"], _get_all + + def post(self) -> BlueprintFactoryResponse: + """Create a new data connector.""" + + @authenticate(self.authenticator) + @only_authenticated + @validate(json=apispec.DataConnectorPost) + async def _post( + _: Request, user: base_models.APIUser, body: apispec.DataConnectorPost, validator: RCloneValidator + ) -> JSONResponse: + data_connector = validate_unsaved_data_connector(body, validator=validator) + result = await self.data_connector_repo.insert_data_connector(user=user, data_connector=data_connector) + return validated_json( + apispec.DataConnector, + self._dump_data_connector(result, validator=validator), + status=201, + ) + + return "/data_connectors", ["POST"], _post + + def get_one(self) -> BlueprintFactoryResponse: + """Get a specific data connector.""" + + @authenticate(self.authenticator) + @extract_if_none_match + async def _get_one( + _: Request, user: base_models.APIUser, data_connector_id: ULID, etag: str | None, validator: RCloneValidator + ) -> HTTPResponse: + data_connector = await self.data_connector_repo.get_data_connector( + user=user, data_connector_id=data_connector_id + ) + + if data_connector.etag == etag: + return HTTPResponse(status=304) + + headers = {"ETag": data_connector.etag} + return validated_json( + apispec.DataConnector, + self._dump_data_connector(data_connector, validator=validator), + headers=headers, + ) + + return "/data_connectors/", ["GET"], _get_one + + def get_one_by_slug(self) -> BlueprintFactoryResponse: + """Get a specific data connector by namespace/entity slug.""" + + @authenticate(self.authenticator) + @extract_if_none_match + async def _get_one_by_slug( + _: Request, + user: base_models.APIUser, + namespace: str, + slug: str, + etag: str | None, + validator: RCloneValidator, + ) -> HTTPResponse: + data_connector = await self.data_connector_repo.get_data_connector_by_slug( + user=user, namespace=namespace, slug=slug + ) + + if data_connector.etag == etag: + return HTTPResponse(status=304) + + headers = {"ETag": data_connector.etag} + return validated_json( + apispec.DataConnector, + self._dump_data_connector(data_connector, validator=validator), + headers=headers, + ) + + return "/namespaces//data_connectors/", ["GET"], _get_one_by_slug + + def patch(self) -> BlueprintFactoryResponse: + """Partially update a data connector.""" + + @authenticate(self.authenticator) + @only_authenticated + @if_match_required + @validate(json=apispec.DataConnectorPatch) + async def _patch( + _: Request, + user: base_models.APIUser, + data_connector_id: ULID, + body: apispec.DataConnectorPatch, + etag: str, + validator: RCloneValidator, + ) -> JSONResponse: + existing_dc = await self.data_connector_repo.get_data_connector( + user=user, data_connector_id=data_connector_id + ) + dc_patch = validate_data_connector_patch(existing_dc, body, validator=validator) + data_connector_update = await self.data_connector_repo.update_data_connector( + user=user, data_connector_id=data_connector_id, patch=dc_patch, etag=etag + ) + + return validated_json( + apispec.DataConnector, + self._dump_data_connector(data_connector_update.new, validator=validator), + ) + + return "/data_connectors/", ["PATCH"], _patch + + def delete(self) -> BlueprintFactoryResponse: + """Delete a data connector.""" + + @authenticate(self.authenticator) + @only_authenticated + async def _delete( + _: Request, + user: base_models.APIUser, + data_connector_id: ULID, + ) -> HTTPResponse: + await self.data_connector_repo.delete_data_connector(user=user, data_connector_id=data_connector_id) + return HTTPResponse(status=204) + + return "/data_connectors/", ["DELETE"], _delete + + def get_all_project_links(self) -> BlueprintFactoryResponse: + """List all links from a given data connector to projects.""" + + @authenticate(self.authenticator) + async def _get_all_project_links( + _: Request, + user: base_models.APIUser, + data_connector_id: ULID, + ) -> JSONResponse: + links = await self.data_connector_to_project_link_repo.get_links_from( + user=user, data_connector_id=data_connector_id + ) + return validated_json( + apispec.DataConnectorToProjectLinksList, + [self._dump_data_connector_to_project_link(link) for link in links], + ) + + return "/data_connectors//project_links", ["GET"], _get_all_project_links + + def post_project_link(self) -> BlueprintFactoryResponse: + """Create a new link from a data connector to a project.""" + + @authenticate(self.authenticator) + @only_authenticated + @validate(json=apispec.DataConnectorToProjectLinkPost) + async def _post_project_link( + _: Request, + user: base_models.APIUser, + data_connector_id: ULID, + body: apispec.DataConnectorToProjectLinkPost, + ) -> JSONResponse: + unsaved_link = models.UnsavedDataConnectorToProjectLink( + data_connector_id=data_connector_id, + project_id=ULID.from_str(body.project_id), + ) + link = await self.data_connector_to_project_link_repo.insert_link(user=user, link=unsaved_link) + return validated_json( + apispec.DataConnectorToProjectLink, self._dump_data_connector_to_project_link(link), status=201 + ) + + return "/data_connectors//project_links", ["POST"], _post_project_link + + def delete_project_link(self) -> BlueprintFactoryResponse: + """Delete a link from a data connector to a project.""" + + @authenticate(self.authenticator) + @only_authenticated + async def _delete_project_link( + _: Request, + user: base_models.APIUser, + data_connector_id: ULID, + link_id: ULID, + ) -> HTTPResponse: + await self.data_connector_to_project_link_repo.delete_link( + user=user, data_connector_id=data_connector_id, link_id=link_id + ) + return HTTPResponse(status=204) + + return ( + "/data_connectors//project_links/", + ["DELETE"], + _delete_project_link, + ) + + def get_all_data_connectors_links_to_project(self) -> BlueprintFactoryResponse: + """List all links from data connectors to a given project.""" + + @authenticate(self.authenticator) + async def _get_all_data_connectors_links_to_project( + _: Request, + user: base_models.APIUser, + project_id: ULID, + ) -> JSONResponse: + links = await self.data_connector_to_project_link_repo.get_links_to(user=user, project_id=project_id) + return validated_json( + apispec.DataConnectorToProjectLinksList, + [self._dump_data_connector_to_project_link(link) for link in links], + ) + + return "/projects//data_connector_links", ["GET"], _get_all_data_connectors_links_to_project + + def get_secrets(self) -> BlueprintFactoryResponse: + """List all saved secrets for a data connector.""" + + @authenticate(self.authenticator) + @only_authenticated + async def _get_secrets( + _: Request, + user: base_models.APIUser, + data_connector_id: ULID, + ) -> JSONResponse: + secrets = await self.data_connector_repo.get_data_connector_secrets( + user=user, data_connector_id=data_connector_id + ) + return validated_json( + apispec.DataConnectorSecretsList, [self._dump_data_connector_secret(secret) for secret in secrets] + ) + + return "/data_connectors//secrets", ["GET"], _get_secrets + + def patch_secrets(self) -> BlueprintFactoryResponse: + """Create, update or delete saved secrets for a data connector.""" + + @authenticate(self.authenticator) + @only_authenticated + @validate_body_root_model(json=apispec.DataConnectorSecretPatchList) + async def _patch_secrets( + _: Request, + user: base_models.APIUser, + data_connector_id: ULID, + body: apispec.DataConnectorSecretPatchList, + ) -> JSONResponse: + unsaved_secrets = validate_data_connector_secrets_patch(put=body) + secrets = await self.data_connector_repo.patch_data_connector_secrets( + user=user, data_connector_id=data_connector_id, secrets=unsaved_secrets + ) + return validated_json( + apispec.DataConnectorSecretsList, [self._dump_data_connector_secret(secret) for secret in secrets] + ) + + return "/data_connectors//secrets", ["PATCH"], _patch_secrets + + def delete_secrets(self) -> BlueprintFactoryResponse: + """Delete all saved secrets for a data connector.""" + + @authenticate(self.authenticator) + @only_authenticated + async def _delete_secrets( + _: Request, + user: base_models.APIUser, + data_connector_id: ULID, + ) -> HTTPResponse: + await self.data_connector_repo.delete_data_connector_secrets(user=user, data_connector_id=data_connector_id) + return HTTPResponse(status=204) + + return "/data_connectors//secrets", ["DELETE"], _delete_secrets + + @staticmethod + def _dump_data_connector(data_connector: models.DataConnector, validator: RCloneValidator) -> dict[str, Any]: + """Dumps a data connector for API responses.""" + storage = dump_storage_with_sensitive_fields(data_connector.storage, validator=validator) + return dict( + id=str(data_connector.id), + name=data_connector.name, + namespace=data_connector.namespace.slug, + slug=data_connector.slug, + storage=storage, + # secrets=, + creation_date=data_connector.creation_date, + created_by=data_connector.created_by, + visibility=data_connector.visibility.value, + description=data_connector.description, + etag=data_connector.etag, + keywords=data_connector.keywords or [], + ) + + @staticmethod + def _dump_data_connector_to_project_link(link: models.DataConnectorToProjectLink) -> dict[str, Any]: + """Dumps a link from a data connector to a project for API responses.""" + return dict( + id=str(link.id), + data_connector_id=str(link.data_connector_id), + project_id=str(link.project_id), + creation_date=link.creation_date, + created_by=link.created_by, + ) + + @staticmethod + def _dump_data_connector_secret(secret: models.DataConnectorSecret) -> dict[str, Any]: + """Dumps a data connector secret for API responses.""" + return dict( + name=secret.name, + secret_id=str(secret.secret_id), + ) diff --git a/components/renku_data_services/data_connectors/core.py b/components/renku_data_services/data_connectors/core.py new file mode 100644 index 000000000..248b1fb9f --- /dev/null +++ b/components/renku_data_services/data_connectors/core.py @@ -0,0 +1,140 @@ +"""Business logic for data connectors.""" + +from dataclasses import asdict +from typing import Any + +from renku_data_services import base_models, errors +from renku_data_services.authz.models import Visibility +from renku_data_services.data_connectors import apispec, models +from renku_data_services.storage import models as storage_models +from renku_data_services.storage.rclone import RCloneValidator + + +def dump_storage_with_sensitive_fields( + storage: models.CloudStorageCore, validator: RCloneValidator +) -> models.CloudStorageCoreWithSensitiveFields: + """Add sensitive fields to a storage configuration.""" + return models.CloudStorageCoreWithSensitiveFields( + sensitive_fields=list(validator.get_private_fields(storage.configuration)), **asdict(storage) + ) + + +def validate_unsaved_storage( + storage: apispec.CloudStorageCorePost | apispec.CloudStorageUrlV2, validator: RCloneValidator +) -> models.CloudStorageCore: + """Validate the storage configuration of an unsaved data connector.""" + + configuration: dict[str, Any] + source_path: str + + if isinstance(storage, apispec.CloudStorageUrlV2): + cloud_storage = storage_models.UnsavedCloudStorage.from_url( + project_id="FAKEPROJECTID", + name="fake-storage-name", + storage_url=storage.storage_url, + target_path=storage.target_path, + readonly=storage.readonly, + ) + configuration = cloud_storage.configuration.config + source_path = cloud_storage.source_path + else: + configuration = storage.configuration + source_path = storage.source_path + + validator.validate(configuration) + + return models.CloudStorageCore( + storage_type=configuration["type"], + configuration=configuration, + source_path=source_path, + target_path=storage.target_path, + readonly=storage.readonly, + ) + + +def validate_unsaved_data_connector( + body: apispec.DataConnectorPost, validator: RCloneValidator +) -> models.UnsavedDataConnector: + """Validate an unsaved data connector.""" + + keywords = [kw.root for kw in body.keywords] if body.keywords is not None else [] + storage = validate_unsaved_storage(body.storage, validator=validator) + + return models.UnsavedDataConnector( + name=body.name, + namespace=body.namespace, + slug=body.slug or base_models.Slug.from_name(body.name).value, + visibility=Visibility(body.visibility.value), + created_by="", + storage=storage, + description=body.description, + keywords=keywords, + ) + + +def validate_storage_patch( + storage: models.CloudStorageCore, patch: apispec.CloudStorageCorePatch, validator: RCloneValidator +) -> models.CloudStorageCorePatch: + """Validate the update to a data connector's storage.""" + + if patch.configuration is not None: + # we need to apply the patch to the existing storage to properly validate it + patch.configuration = {**storage.configuration, **patch.configuration} + dict_items = list(patch.configuration.items()) + for k, v in dict_items: + if v is None: + # delete fields that were unset + del patch.configuration[k] + validator.validate(patch.configuration) + + return models.CloudStorageCorePatch( + storage_type=patch.storage_type, + configuration=patch.configuration, + source_path=patch.source_path, + target_path=patch.target_path, + readonly=patch.readonly, + ) + + +def validate_data_connector_patch( + data_connector: models.DataConnector, + patch: apispec.DataConnectorPatch, + validator: RCloneValidator, +) -> models.DataConnectorPatch: + """Validate the update to a data connector.""" + + keywords = [kw.root for kw in patch.keywords] if patch.keywords is not None else None + storage = ( + validate_storage_patch(data_connector.storage, patch.storage, validator=validator) + if patch.storage is not None + else None + ) + + return models.DataConnectorPatch( + name=patch.name, + namespace=patch.namespace, + slug=patch.slug, + visibility=Visibility(patch.visibility.value) if patch.visibility is not None else None, + description=patch.description, + keywords=keywords, + storage=storage, + ) + + +def validate_data_connector_secrets_patch( + put: apispec.DataConnectorSecretPatchList, +) -> list[models.DataConnectorSecretPatch]: + """Validate the update to a data connector's secrets.""" + seen_names: set[str] = set() + for secret in put.root: + if secret.name in seen_names: + raise errors.ValidationError(message=f"Found duplicate name '{secret.name}' in the list of secrets.") + seen_names.add(secret.name) + + return [ + models.DataConnectorSecretPatch( + name=secret.name, + value=secret.value, + ) + for secret in put.root + ] diff --git a/components/renku_data_services/data_connectors/db.py b/components/renku_data_services/data_connectors/db.py new file mode 100644 index 000000000..423c5d629 --- /dev/null +++ b/components/renku_data_services/data_connectors/db.py @@ -0,0 +1,588 @@ +"""Adapters for data connectors database classes.""" + +from collections.abc import Callable +from typing import TypeVar + +from cryptography.hazmat.primitives.asymmetric import rsa +from sqlalchemy import Select, delete, func, select +from sqlalchemy.ext.asyncio import AsyncSession +from ulid import ULID + +from renku_data_services import base_models, errors +from renku_data_services.authz.authz import Authz, AuthzOperation, ResourceType +from renku_data_services.authz.models import Scope +from renku_data_services.base_api.pagination import PaginationRequest +from renku_data_services.data_connectors import apispec, models +from renku_data_services.data_connectors import orm as schemas +from renku_data_services.namespace import orm as ns_schemas +from renku_data_services.secrets import orm as secrets_schemas +from renku_data_services.secrets.core import encrypt_user_secret +from renku_data_services.secrets.models import SecretKind +from renku_data_services.users.db import UserRepo +from renku_data_services.utils.core import with_db_transaction + + +class DataConnectorRepository: + """Repository for data connectors.""" + + def __init__( + self, + session_maker: Callable[..., AsyncSession], + authz: Authz, + user_repo: UserRepo, + secret_service_public_key: rsa.RSAPublicKey, + ) -> None: + self.session_maker = session_maker + self.authz = authz + self.user_repo = user_repo + self.secret_service_public_key = secret_service_public_key + + async def get_data_connectors( + self, user: base_models.APIUser, pagination: PaginationRequest, namespace: str | None = None + ) -> tuple[list[models.DataConnector], int]: + """Get multiple data connectors from the database.""" + data_connector_ids = await self.authz.resources_with_permission( + user, user.id, ResourceType.data_connector, Scope.READ + ) + + async with self.session_maker() as session: + stmt = select(schemas.DataConnectorORM).where(schemas.DataConnectorORM.id.in_(data_connector_ids)) + if namespace: + stmt = _filter_by_namespace_slug(stmt, namespace) + stmt = stmt.limit(pagination.per_page).offset(pagination.offset) + stmt = stmt.order_by(schemas.DataConnectorORM.id.desc()) + stmt_count = ( + select(func.count()) + .select_from(schemas.DataConnectorORM) + .where(schemas.DataConnectorORM.id.in_(data_connector_ids)) + ) + if namespace: + stmt_count = _filter_by_namespace_slug(stmt_count, namespace) + results = await session.scalars(stmt), await session.scalar(stmt_count) + data_connectors = results[0].all() + total_elements = results[1] or 0 + return [dc.dump() for dc in data_connectors], total_elements + + async def get_data_connector( + self, + user: base_models.APIUser, + data_connector_id: ULID, + ) -> models.DataConnector: + """Get one data connector from the database.""" + not_found_msg = f"Data connector with id '{ + data_connector_id}' does not exist or you do not have access to it." + + authorized = await self.authz.has_permission(user, ResourceType.data_connector, data_connector_id, Scope.READ) + if not authorized: + raise errors.MissingResourceError(message=not_found_msg) + + async with self.session_maker() as session: + result = await session.scalars( + select(schemas.DataConnectorORM).where(schemas.DataConnectorORM.id == data_connector_id) + ) + data_connector = result.one_or_none() + if data_connector is None: + raise errors.MissingResourceError(message=not_found_msg) + return data_connector.dump() + + async def get_data_connector_by_slug( + self, user: base_models.APIUser, namespace: str, slug: str + ) -> models.DataConnector: + """Get one data connector from the database by slug.""" + not_found_msg = f"Data connector with identifier '{ + namespace}/{slug}' does not exist or you do not have access to it." + + async with self.session_maker() as session: + stmt = select(schemas.DataConnectorORM) + stmt = _filter_by_namespace_slug(stmt, namespace) + stmt = stmt.where(ns_schemas.EntitySlugORM.slug == slug.lower()) + result = await session.scalars(stmt) + data_connector = result.one_or_none() + if data_connector is None: + raise errors.MissingResourceError(message=not_found_msg) + + authorized = await self.authz.has_permission( + user=user, + resource_type=ResourceType.data_connector, + resource_id=data_connector.id, + scope=Scope.READ, + ) + if not authorized: + raise errors.MissingResourceError(message=not_found_msg) + + return data_connector.dump() + + @with_db_transaction + @Authz.authz_change(AuthzOperation.create, ResourceType.data_connector) + async def insert_data_connector( + self, + user: base_models.APIUser, + data_connector: models.UnsavedDataConnector, + *, + session: AsyncSession | None = None, + ) -> models.DataConnector: + """Insert a new data connector entry.""" + if not session: + raise errors.ProgrammingError(message="A database session is required.") + ns = await session.scalar( + select(ns_schemas.NamespaceORM).where(ns_schemas.NamespaceORM.slug == data_connector.namespace.lower()) + ) + if not ns: + raise errors.MissingResourceError( + message=f"The data connector cannot be created because the namespace { + data_connector.namespace} does not exist." + ) + if not ns.group_id and not ns.user_id: + raise errors.ProgrammingError(message="Found a namespace that has no group or user associated with it.") + + if user.id is None: + raise errors.UnauthorizedError(message="You do not have the required permissions for this operation.") + + resource_type, resource_id = ( + (ResourceType.group, ns.group_id) if ns.group and ns.group_id else (ResourceType.user_namespace, ns.id) + ) + has_permission = await self.authz.has_permission(user, resource_type, resource_id, Scope.WRITE) + if not has_permission: + raise errors.ForbiddenError( + message=f"The data connector cannot be created because you do not have sufficient permissions with the namespace {data_connector.namespace}" # noqa: E501 + ) + + slug = data_connector.slug or base_models.Slug.from_name(data_connector.name).value + + existing_slug = await session.scalar( + select(ns_schemas.EntitySlugORM) + .where(ns_schemas.EntitySlugORM.namespace_id == ns.id) + .where(ns_schemas.EntitySlugORM.slug == slug) + ) + if existing_slug is not None: + raise errors.ConflictError(message=f"An entity with the slug '{ns.slug}/{slug}' already exists.") + + visibility_orm = ( + apispec.Visibility(data_connector.visibility) + if isinstance(data_connector.visibility, str) + else apispec.Visibility(data_connector.visibility.value) + ) + data_connector_orm = schemas.DataConnectorORM( + name=data_connector.name, + visibility=visibility_orm, + storage_type=data_connector.storage.storage_type, + configuration=data_connector.storage.configuration, + source_path=data_connector.storage.source_path, + target_path=data_connector.storage.target_path, + readonly=data_connector.storage.readonly, + created_by_id=user.id, + description=data_connector.description, + keywords=data_connector.keywords, + ) + data_connector_slug = ns_schemas.EntitySlugORM.create_data_connector_slug( + slug, data_connector_id=data_connector_orm.id, namespace_id=ns.id + ) + + session.add(data_connector_orm) + session.add(data_connector_slug) + await session.flush() + await session.refresh(data_connector_orm) + + return data_connector_orm.dump() + + @with_db_transaction + @Authz.authz_change(AuthzOperation.update, ResourceType.data_connector) + async def update_data_connector( + self, + user: base_models.APIUser, + data_connector_id: ULID, + patch: models.DataConnectorPatch, + etag: str, + *, + session: AsyncSession | None = None, + ) -> models.DataConnectorUpdate: + """Update a data connector entry.""" + not_found_msg = f"Data connector with id '{data_connector_id}' does not exist or you do not have access to it." + + if not session: + raise errors.ProgrammingError(message="A database session is required.") + result = await session.scalars( + select(schemas.DataConnectorORM).where(schemas.DataConnectorORM.id == data_connector_id) + ) + data_connector = result.one_or_none() + if data_connector is None: + raise errors.MissingResourceError(message=not_found_msg) + old_data_connector = data_connector.dump() + + required_scope = Scope.WRITE + if patch.visibility is not None and patch.visibility != old_data_connector.visibility: + # NOTE: changing the visibility requires the user to be owner which means they should have DELETE permission + required_scope = Scope.DELETE + if patch.namespace is not None and patch.namespace != old_data_connector.namespace.slug: + # NOTE: changing the namespace requires the user to be owner which means they should have DELETE permission # noqa E501 + required_scope = Scope.DELETE + authorized = await self.authz.has_permission( + user, ResourceType.data_connector, data_connector_id, required_scope + ) + if not authorized: + raise errors.MissingResourceError(message=not_found_msg) + + current_etag = data_connector.dump().etag + if current_etag != etag: + raise errors.ConflictError( + message=f"Current ETag is { + current_etag}, not {etag}." + ) + + # TODO: handle slug update + if patch.name is not None: + data_connector.name = patch.name + if patch.visibility is not None: + visibility_orm = ( + apispec.Visibility(patch.visibility) + if isinstance(patch.visibility, str) + else apispec.Visibility(patch.visibility.value) + ) + data_connector.visibility = visibility_orm + if patch.namespace is not None: + ns = await session.scalar( + select(ns_schemas.NamespaceORM).where(ns_schemas.NamespaceORM.slug == patch.namespace.lower()) + ) + if not ns: + raise errors.MissingResourceError( + message=f"Rhe namespace with slug { + patch.namespace} does not exist." + ) + if not ns.group_id and not ns.user_id: + raise errors.ProgrammingError(message="Found a namespace that has no group or user associated with it.") + resource_type, resource_id = ( + (ResourceType.group, ns.group_id) if ns.group and ns.group_id else (ResourceType.user_namespace, ns.id) + ) + has_permission = await self.authz.has_permission(user, resource_type, resource_id, Scope.WRITE) + if not has_permission: + raise errors.ForbiddenError( + message=f"The data connector cannot be moved because you do not have sufficient permissions with the namespace {patch.namespace}." # noqa: E501 + ) + data_connector.slug.namespace_id = ns.id + if patch.description is not None: + data_connector.description = patch.description if patch.description else None + if patch.keywords is not None: + data_connector.keywords = patch.keywords if patch.keywords else None + if patch.storage is not None: + if patch.storage.configuration is not None: + data_connector.configuration = patch.storage.configuration + data_connector.storage_type = data_connector.configuration["type"] + if patch.storage.source_path is not None: + data_connector.source_path = patch.storage.source_path + if patch.storage.target_path is not None: + data_connector.target_path = patch.storage.target_path + if patch.storage.readonly is not None: + data_connector.readonly = patch.storage.readonly + + await session.flush() + await session.refresh(data_connector) + + return models.DataConnectorUpdate( + old=old_data_connector, + new=data_connector.dump(), + ) + + @with_db_transaction + @Authz.authz_change(AuthzOperation.delete, ResourceType.data_connector) + async def delete_data_connector( + self, + user: base_models.APIUser, + data_connector_id: ULID, + *, + session: AsyncSession | None = None, + ) -> models.DataConnector | None: + """Delete a data connector.""" + if not session: + raise errors.ProgrammingError(message="A database session is required.") + authorized = await self.authz.has_permission(user, ResourceType.data_connector, data_connector_id, Scope.DELETE) + if not authorized: + raise errors.MissingResourceError( + message=f"Data connector with id '{data_connector_id}' does not exist or you do not have access to it." + ) + + result = await session.scalars( + select(schemas.DataConnectorORM).where(schemas.DataConnectorORM.id == data_connector_id) + ) + data_connector_orm = result.one_or_none() + if data_connector_orm is None: + return None + + data_connector = data_connector_orm.dump() + await session.delete(data_connector_orm) + return data_connector + + async def get_data_connector_secrets( + self, + user: base_models.APIUser, + data_connector_id: ULID, + ) -> list[models.DataConnectorSecret]: + """Get data connectors secrets from the database.""" + if user.id is None: + raise errors.UnauthorizedError(message="You do not have the required permissions for this operation.") + + async with self.session_maker() as session: + stmt = ( + select(schemas.DataConnectorSecretORM) + .where(schemas.DataConnectorSecretORM.user_id == user.id) + .where(schemas.DataConnectorSecretORM.data_connector_id == data_connector_id) + .where(schemas.DataConnectorSecretORM.secret_id == secrets_schemas.SecretORM.id) + .where(secrets_schemas.SecretORM.user_id == user.id) + ) + results = await session.scalars(stmt) + secrets = results.all() + + return [secret.dump() for secret in secrets] + + async def patch_data_connector_secrets( + self, user: base_models.APIUser, data_connector_id: ULID, secrets: list[models.DataConnectorSecretPatch] + ) -> list[models.DataConnectorSecret]: + """Create, update or remove data connector secrets.""" + if user.id is None: + raise errors.UnauthorizedError(message="You do not have the required permissions for this operation.") + + # NOTE: check that the user can access the data connector + await self.get_data_connector(user=user, data_connector_id=data_connector_id) + + secrets_as_dict = {s.name: s.value for s in secrets} + + async with self.session_maker() as session, session.begin(): + stmt = ( + select(schemas.DataConnectorSecretORM) + .where(schemas.DataConnectorSecretORM.user_id == user.id) + .where(schemas.DataConnectorSecretORM.data_connector_id == data_connector_id) + .where(schemas.DataConnectorSecretORM.secret_id == secrets_schemas.SecretORM.id) + .where(secrets_schemas.SecretORM.user_id == user.id) + ) + result = await session.scalars(stmt) + existing_secrets = result.all() + existing_secrets_as_dict = {s.name: s for s in existing_secrets} + + all_secrets = [] + + for name, value in secrets_as_dict.items(): + if value is None: + # Remove the secret + data_connector_secret_orm = existing_secrets_as_dict.get(name) + if data_connector_secret_orm is None: + continue + await session.delete(data_connector_secret_orm.secret) + del existing_secrets_as_dict[name] + continue + + encrypted_value, encrypted_key = await encrypt_user_secret( + user_repo=self.user_repo, + requested_by=user, + secret_service_public_key=self.secret_service_public_key, + secret_value=value, + ) + + if data_connector_secret_orm := existing_secrets_as_dict.get(name): + data_connector_secret_orm.secret.update( + encrypted_value=encrypted_value, encrypted_key=encrypted_key + ) + else: + secret_orm = secrets_schemas.SecretORM( + name=f"{data_connector_id}-{name}", + user_id=user.id, + encrypted_value=encrypted_value, + encrypted_key=encrypted_key, + kind=SecretKind.storage, + ) + data_connector_secret_orm = schemas.DataConnectorSecretORM( + name=name, + user_id=user.id, + data_connector_id=data_connector_id, + secret_id=secret_orm.id, + ) + session.add(secret_orm) + session.add(data_connector_secret_orm) + + all_secrets.append(data_connector_secret_orm.dump()) + + return all_secrets + + async def delete_data_connector_secrets(self, user: base_models.APIUser, data_connector_id: ULID) -> None: + """Delete data connector secrets.""" + if user.id is None: + raise errors.UnauthorizedError(message="You do not have the required permissions for this operation.") + + async with self.session_maker() as session, session.begin(): + stmt = ( + delete(secrets_schemas.SecretORM) + .where(secrets_schemas.SecretORM.user_id == user.id) + .where(secrets_schemas.SecretORM.id == schemas.DataConnectorSecretORM.secret_id) + .where(schemas.DataConnectorSecretORM.data_connector_id == data_connector_id) + ) + await session.execute(stmt) + stmt = ( + delete(schemas.DataConnectorSecretORM) + .where(schemas.DataConnectorSecretORM.user_id == user.id) + .where(schemas.DataConnectorSecretORM.data_connector_id == data_connector_id) + ) + await session.execute(stmt) + + +class DataConnectorProjectLinkRepository: + """Repository for links from data connectors to projects.""" + + def __init__( + self, + session_maker: Callable[..., AsyncSession], + authz: Authz, + ) -> None: + self.session_maker = session_maker + self.authz = authz + + async def get_links_from( + self, user: base_models.APIUser, data_connector_id: ULID + ) -> list[models.DataConnectorToProjectLink]: + """Get links from a given data connector.""" + authorized = await self.authz.has_permission(user, ResourceType.data_connector, data_connector_id, Scope.READ) + if not authorized: + raise errors.MissingResourceError( + message=f"Data connector with id '{ + data_connector_id}' does not exist or you do not have access to it." + ) + + project_ids = await self.authz.resources_with_permission(user, user.id, ResourceType.project, Scope.READ) + + async with self.session_maker() as session: + stmt = ( + select(schemas.DataConnectorToProjectLinkORM) + .where(schemas.DataConnectorToProjectLinkORM.data_connector_id == data_connector_id) + .where(schemas.DataConnectorToProjectLinkORM.project_id.in_(project_ids)) + ) + result = await session.scalars(stmt) + links_orm = result.all() + return [link.dump() for link in links_orm] + + async def get_links_to( + self, user: base_models.APIUser, project_id: ULID + ) -> list[models.DataConnectorToProjectLink]: + """Get links to a given project.""" + authorized = await self.authz.has_permission(user, ResourceType.project, project_id, Scope.READ) + if not authorized: + raise errors.MissingResourceError( + message=f"Project with id '{ + project_id}' does not exist or you do not have access to it." + ) + + data_connector_ids = await self.authz.resources_with_permission( + user, user.id, ResourceType.data_connector, Scope.READ + ) + + async with self.session_maker() as session: + stmt = ( + select(schemas.DataConnectorToProjectLinkORM) + .where(schemas.DataConnectorToProjectLinkORM.project_id == project_id) + .where(schemas.DataConnectorToProjectLinkORM.data_connector_id.in_(data_connector_ids)) + ) + result = await session.scalars(stmt) + links_orm = result.all() + return [link.dump() for link in links_orm] + + @with_db_transaction + @Authz.authz_change(AuthzOperation.create_link, ResourceType.data_connector) + async def insert_link( + self, + user: base_models.APIUser, + link: models.UnsavedDataConnectorToProjectLink, + *, + session: AsyncSession | None = None, + ) -> models.DataConnectorToProjectLink: + """Insert a new link from a data connector to a project.""" + if not session: + raise errors.ProgrammingError(message="A database session is required.") + + if user.id is None: + raise errors.UnauthorizedError(message="You do not have the required permissions for this operation.") + + data_connector = ( + await session.scalars( + select(schemas.DataConnectorORM).where(schemas.DataConnectorORM.id == link.data_connector_id) + ) + ).one_or_none() + if data_connector is None: + raise errors.MissingResourceError( + message=f"Data connector with id '{link.data_connector_id}' does not exist or you do not have access to it." # noqa E501 + ) + + project = ( + await session.scalars(select(schemas.ProjectORM).where(schemas.ProjectORM.id == link.project_id)) + ).one_or_none() + if project is None: + raise errors.MissingResourceError( + message=f"Project with id '{link.project_id}' does not exist or you do not have access to it." + ) + + existing_link = await session.scalar( + select(schemas.DataConnectorToProjectLinkORM) + .where(schemas.DataConnectorToProjectLinkORM.data_connector_id == link.data_connector_id) + .where(schemas.DataConnectorToProjectLinkORM.project_id == link.project_id) + ) + if existing_link is not None: + raise errors.ConflictError( + message=f"A link from data connector {link.data_connector_id} to project {link.project_id} already exists." # noqa E501 + ) + + link_orm = schemas.DataConnectorToProjectLinkORM( + data_connector_id=link.data_connector_id, + project_id=link.project_id, + created_by_id=user.id, + ) + + session.add(link_orm) + await session.flush() + await session.refresh(link_orm) + + return link_orm.dump() + + @with_db_transaction + @Authz.authz_change(AuthzOperation.delete_link, ResourceType.data_connector) + async def delete_link( + self, + user: base_models.APIUser, + data_connector_id: ULID, + link_id: ULID, + *, + session: AsyncSession | None = None, + ) -> models.DataConnectorToProjectLink | None: + """Delete a link from a data connector to a project.""" + if not session: + raise errors.ProgrammingError(message="A database session is required.") + + link_orm = ( + await session.scalars( + select(schemas.DataConnectorToProjectLinkORM) + .where(schemas.DataConnectorToProjectLinkORM.id == link_id) + .where(schemas.DataConnectorToProjectLinkORM.data_connector_id == data_connector_id) + ) + ).one_or_none() + if link_orm is None: + return None + + authorized_from = await self.authz.has_permission( + user, ResourceType.data_connector, data_connector_id, Scope.DELETE + ) + authorized_to = await self.authz.has_permission(user, ResourceType.project, link_orm.project_id, Scope.WRITE) + authorized = authorized_from or authorized_to + if not authorized: + raise errors.MissingResourceError( + message=f"Data connector to project link '{link_id}' does not exist or you do not have access to it." + ) + + link = link_orm.dump() + await session.delete(link_orm) + return link + + +_T = TypeVar("_T") + + +def _filter_by_namespace_slug(statement: Select[tuple[_T]], namespace: str) -> Select[tuple[_T]]: + """Filters a select query on data connectors to a given namespace.""" + return ( + statement.where(ns_schemas.NamespaceORM.slug == namespace.lower()) + .where(ns_schemas.EntitySlugORM.namespace_id == ns_schemas.NamespaceORM.id) + .where(schemas.DataConnectorORM.id == ns_schemas.EntitySlugORM.data_connector_id) + ) diff --git a/components/renku_data_services/data_connectors/models.py b/components/renku_data_services/data_connectors/models.py new file mode 100644 index 000000000..dc66dcf1e --- /dev/null +++ b/components/renku_data_services/data_connectors/models.py @@ -0,0 +1,136 @@ +"""Models for data connectors.""" + +from dataclasses import dataclass, field +from datetime import UTC, datetime +from typing import TYPE_CHECKING, Any + +from ulid import ULID + +from renku_data_services.authz.models import Visibility +from renku_data_services.namespace.models import Namespace +from renku_data_services.utils.etag import compute_etag_from_timestamp + +if TYPE_CHECKING: + from renku_data_services.storage.rclone import RCloneOption + + +@dataclass(frozen=True, eq=True, kw_only=True) +class CloudStorageCore: + """Remote storage configuration model.""" + + storage_type: str + configuration: dict[str, Any] + source_path: str + target_path: str + readonly: bool + + +@dataclass(frozen=True, eq=True, kw_only=True) +class BaseDataConnector: + """Base data connector model.""" + + name: str + slug: str + visibility: Visibility + created_by: str + creation_date: datetime = field(default_factory=lambda: datetime.now(UTC)) + updated_at: datetime | None = field(default=None) + description: str | None = None + keywords: list[str] | None = None + storage: CloudStorageCore + + +@dataclass(frozen=True, eq=True, kw_only=True) +class DataConnector(BaseDataConnector): + """Data connector model.""" + + id: ULID + namespace: Namespace + updated_at: datetime + + @property + def etag(self) -> str: + """Entity tag value for this data connector object.""" + return compute_etag_from_timestamp(self.updated_at, include_quotes=True) + + +@dataclass(frozen=True, eq=True, kw_only=True) +class UnsavedDataConnector(BaseDataConnector): + """A data connector that hasn't been stored in the database.""" + + namespace: str + + +@dataclass(frozen=True, eq=True, kw_only=True) +class CloudStorageCorePatch: + """Model for changes requested on a remote storage configuration.""" + + storage_type: str | None + configuration: dict[str, Any] | None + source_path: str | None + target_path: str | None + readonly: bool | None + + +@dataclass(frozen=True, eq=True, kw_only=True) +class DataConnectorPatch: + """Model for changes requested on a data connector.""" + + name: str | None + namespace: str | None + slug: str | None + visibility: Visibility | None + description: str | None + keywords: list[str] | None + storage: CloudStorageCorePatch | None + + +@dataclass(frozen=True, eq=True, kw_only=True) +class CloudStorageCoreWithSensitiveFields(CloudStorageCore): + """Remote storage configuration model with sensitive fields.""" + + sensitive_fields: list["RCloneOption"] + + +@dataclass(frozen=True, eq=True, kw_only=True) +class DataConnectorUpdate: + """Information about the update of a data connector.""" + + old: DataConnector + new: DataConnector + + +@dataclass(frozen=True, eq=True, kw_only=True) +class UnsavedDataConnectorToProjectLink: + """Base model for a link from a data connector to a project.""" + + data_connector_id: ULID + project_id: ULID + + +@dataclass(frozen=True, eq=True, kw_only=True) +class DataConnectorToProjectLink(UnsavedDataConnectorToProjectLink): + """A link from a data connector to a project.""" + + id: ULID + created_by: str + creation_date: datetime + updated_at: datetime + + +@dataclass(frozen=True, eq=True, kw_only=True) +class DataConnectorSecret: + """Data connector secret model.""" + + name: str + user_id: str + data_connector_id: ULID + secret_id: ULID + + +@dataclass(frozen=True, eq=True, kw_only=True) +class DataConnectorSecretPatch: + """Secret to be saved for a data connector.""" + + name: str + value: str | None diff --git a/components/renku_data_services/data_connectors/orm.py b/components/renku_data_services/data_connectors/orm.py new file mode 100644 index 000000000..447168d34 --- /dev/null +++ b/components/renku_data_services/data_connectors/orm.py @@ -0,0 +1,198 @@ +"""SQLAlchemy schemas for the data connectors database.""" + +from datetime import datetime +from typing import TYPE_CHECKING, Any + +from sqlalchemy import JSON, Boolean, DateTime, ForeignKey, MetaData, String, func +from sqlalchemy.dialects.postgresql import ARRAY, JSONB +from sqlalchemy.orm import DeclarativeBase, Mapped, MappedAsDataclass, mapped_column, relationship +from sqlalchemy.schema import Index, UniqueConstraint +from ulid import ULID + +from renku_data_services.authz import models as authz_models +from renku_data_services.base_orm.registry import COMMON_ORM_REGISTRY +from renku_data_services.data_connectors import models +from renku_data_services.data_connectors.apispec import Visibility +from renku_data_services.project.orm import ProjectORM +from renku_data_services.secrets.orm import SecretORM +from renku_data_services.users.orm import UserORM +from renku_data_services.utils.sqlalchemy import ULIDType + +if TYPE_CHECKING: + from renku_data_services.namespace.orm import EntitySlugORM + +JSONVariant = JSON().with_variant(JSONB(), "postgresql") + + +class BaseORM(MappedAsDataclass, DeclarativeBase): + """Base class for all ORM classes.""" + + metadata = MetaData(schema="storage") + registry = COMMON_ORM_REGISTRY + + +class DataConnectorORM(BaseORM): + """A data connector for Renku 2.0.""" + + __tablename__ = "data_connectors" + + id: Mapped[ULID] = mapped_column("id", ULIDType, primary_key=True, default_factory=lambda: str(ULID()), init=False) + """ID of this data connector.""" + + name: Mapped[str] = mapped_column("name", String(99)) + """Name of the data connector.""" + + visibility: Mapped[Visibility] + """Visibility of the data connector.""" + + storage_type: Mapped[str] = mapped_column("storage_type", String(20)) + """Type of storage (e.g. s3), read-only based on 'configuration'.""" + + configuration: Mapped[dict[str, Any]] = mapped_column("configuration", JSONVariant) + """RClone configuration dict.""" + + source_path: Mapped[str] = mapped_column("source_path", String()) + """Source path to mount from (e.g. bucket/folder for s3).""" + + target_path: Mapped[str] = mapped_column("target_path", String()) + """Target folder in the repository to mount to.""" + + created_by_id: Mapped[str] = mapped_column(ForeignKey(UserORM.keycloak_id), index=True, nullable=False) + """User ID of the creator of the data connector.""" + + description: Mapped[str | None] = mapped_column("description", String(500)) + """Human-readable description of the data connector.""" + + keywords: Mapped[list[str] | None] = mapped_column("keywords", ARRAY(String(99)), nullable=True) + """Keywords for the data connector.""" + + slug: Mapped["EntitySlugORM"] = relationship( + lazy="joined", init=False, repr=False, viewonly=True, back_populates="data_connector" + ) + """Slug of the data connector.""" + + readonly: Mapped[bool] = mapped_column("readonly", Boolean(), default=True) + """Whether this storage should be mounted readonly or not """ + + creation_date: Mapped[datetime] = mapped_column( + "creation_date", DateTime(timezone=True), default=func.now(), nullable=False + ) + updated_at: Mapped[datetime] = mapped_column( + "updated_at", + DateTime(timezone=True), + default=None, + server_default=func.now(), + onupdate=func.now(), + nullable=False, + ) + + def dump(self) -> models.DataConnector: + """Create a data connector model from the DataConnectorORM.""" + return models.DataConnector( + id=self.id, + name=self.name, + slug=self.slug.slug, + namespace=self.slug.namespace.dump(), + visibility=self._dump_visibility(), + created_by=self.created_by_id, + creation_date=self.creation_date, + updated_at=self.updated_at, + storage=self._dump_storage(), + description=self.description, + keywords=self.keywords, + ) + + def _dump_visibility(self) -> authz_models.Visibility: + return ( + authz_models.Visibility.PUBLIC if self.visibility == Visibility.public else authz_models.Visibility.PRIVATE + ) + + def _dump_storage(self) -> models.CloudStorageCore: + return models.CloudStorageCore( + storage_type=self.storage_type, + configuration=self.configuration, + source_path=self.source_path, + target_path=self.target_path, + readonly=self.readonly, + ) + + +class DataConnectorToProjectLinkORM(BaseORM): + """A link from a data connector to a project in Renku 2.0.""" + + __tablename__ = "data_connector_to_project_links" + __table_args__ = ( + UniqueConstraint( + "data_connector_id", + "project_id", + name="_unique_data_connector_id_project_id_uc", + ), + ) + + id: Mapped[ULID] = mapped_column("id", ULIDType, primary_key=True, default_factory=lambda: str(ULID()), init=False) + """ID of this data connector to project link.""" + + data_connector_id: Mapped[ULID] = mapped_column( + ForeignKey(DataConnectorORM.id, ondelete="CASCADE"), index=True, nullable=False + ) + """ID of the data connector.""" + + project_id: Mapped[ULID] = mapped_column(ForeignKey(ProjectORM.id, ondelete="CASCADE"), index=True, nullable=False) + """ID of the project.""" + + created_by_id: Mapped[str] = mapped_column( + ForeignKey(UserORM.keycloak_id, ondelete="CASCADE"), index=True, nullable=False + ) + """User ID of the creator of the data connector.""" + + creation_date: Mapped[datetime] = mapped_column( + "creation_date", DateTime(timezone=True), default=func.now(), nullable=False + ) + updated_at: Mapped[datetime] = mapped_column( + "updated_at", + DateTime(timezone=True), + default=None, + server_default=func.now(), + onupdate=func.now(), + nullable=False, + ) + + def dump(self) -> models.DataConnectorToProjectLink: + """Create a link model from the DataConnectorProjectLinkORM.""" + return models.DataConnectorToProjectLink( + id=self.id, + data_connector_id=self.data_connector_id, + project_id=self.project_id, + created_by=self.created_by_id, + creation_date=self.creation_date, + updated_at=self.updated_at, + ) + + +class DataConnectorSecretORM(BaseORM): + """Secrets for data connectors.""" + + __tablename__ = "data_connector_secrets" + __table_args__ = ( + Index("ix_storage_data_connector_secrets_user_id_data_connector_id", "user_id", "data_connector_id"), + ) + + user_id: Mapped[str] = mapped_column(ForeignKey(UserORM.keycloak_id, ondelete="CASCADE"), primary_key=True) + + data_connector_id: Mapped[ULID] = mapped_column( + ForeignKey(DataConnectorORM.id, ondelete="CASCADE"), primary_key=True + ) + + name: Mapped[str] = mapped_column("name", String(), primary_key=True) + + secret_id: Mapped[ULID] = mapped_column("secret_id", ForeignKey(SecretORM.id, ondelete="CASCADE")) + secret: Mapped[SecretORM] = relationship(init=False, repr=False, lazy="selectin") + + def dump(self) -> models.DataConnectorSecret: + """Create a data connector secret model from the DataConnectorSecretORM.""" + return models.DataConnectorSecret( + name=self.name, + user_id=self.user_id, + data_connector_id=self.data_connector_id, + secret_id=self.secret_id, + ) diff --git a/components/renku_data_services/migrations/env.py b/components/renku_data_services/migrations/env.py index 3f360a946..70370b961 100644 --- a/components/renku_data_services/migrations/env.py +++ b/components/renku_data_services/migrations/env.py @@ -5,6 +5,7 @@ from renku_data_services.authz.orm import BaseORM as authz from renku_data_services.connected_services.orm import BaseORM as connected_services from renku_data_services.crc.orm import BaseORM as crc +from renku_data_services.data_connectors.orm import BaseORM as data_connectors from renku_data_services.message_queue.orm import BaseORM as events from renku_data_services.migrations.utils import logging_config, run_migrations from renku_data_services.namespace.orm import BaseORM as namespaces @@ -23,6 +24,7 @@ authz.metadata, crc.metadata, connected_services.metadata, + data_connectors.metadata, events.metadata, namespaces.metadata, platform.metadata, diff --git a/components/renku_data_services/migrations/versions/1ef98b967767_add_command_and_args_to_environment.py b/components/renku_data_services/migrations/versions/1ef98b967767_add_command_and_args_to_environment.py new file mode 100644 index 000000000..f91f342ce --- /dev/null +++ b/components/renku_data_services/migrations/versions/1ef98b967767_add_command_and_args_to_environment.py @@ -0,0 +1,41 @@ +"""Add command and args to environment + +Revision ID: 1ef98b967767 +Revises: 584598f3b769 +Create Date: 2024-08-25 21:05:02.158021 + +""" + +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "1ef98b967767" +down_revision = "584598f3b769" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "environments", + sa.Column("args", sa.JSON().with_variant(postgresql.JSONB(astext_type=sa.Text()), "postgresql"), nullable=True), + schema="sessions", + ) + op.add_column( + "environments", + sa.Column( + "command", sa.JSON().with_variant(postgresql.JSONB(astext_type=sa.Text()), "postgresql"), nullable=True + ), + schema="sessions", + ) + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("environments", "command", schema="sessions") + op.drop_column("environments", "args", schema="sessions") + # ### end Alembic commands ### diff --git a/components/renku_data_services/migrations/versions/3cf2adf9896b_add_data_connectors.py b/components/renku_data_services/migrations/versions/3cf2adf9896b_add_data_connectors.py new file mode 100644 index 000000000..d30c4be8e --- /dev/null +++ b/components/renku_data_services/migrations/versions/3cf2adf9896b_add_data_connectors.py @@ -0,0 +1,144 @@ +"""add data connectors + +Revision ID: 3cf2adf9896b +Revises: a11752a5afba +Create Date: 2024-09-05 14:20:47.006275 + +""" + +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "3cf2adf9896b" +down_revision = "a11752a5afba" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + visibility = postgresql.ENUM(name="visibility", create_type=False) + + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "data_connectors", + sa.Column("id", sa.String(length=26), nullable=False), + sa.Column("name", sa.String(length=99), nullable=False), + sa.Column("visibility", visibility, nullable=False), + sa.Column("storage_type", sa.String(length=20), nullable=False), + sa.Column( + "configuration", + sa.JSON().with_variant(postgresql.JSONB(astext_type=sa.Text()), "postgresql"), + nullable=False, + ), + sa.Column("source_path", sa.String(), nullable=False), + sa.Column("target_path", sa.String(), nullable=False), + sa.Column("created_by_id", sa.String(length=36), nullable=False), + sa.Column("description", sa.String(length=500), nullable=True), + sa.Column("keywords", postgresql.ARRAY(sa.String(length=99)), nullable=True), + sa.Column("readonly", sa.Boolean(), nullable=False), + sa.Column("creation_date", sa.DateTime(timezone=True), nullable=False), + sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False), + sa.ForeignKeyConstraint( + ["created_by_id"], + ["users.users.keycloak_id"], + ), + sa.PrimaryKeyConstraint("id"), + schema="storage", + ) + op.create_index( + op.f("ix_storage_data_connectors_created_by_id"), + "data_connectors", + ["created_by_id"], + unique=False, + schema="storage", + ) + op.add_column("entity_slugs", sa.Column("data_connector_id", sa.String(length=26), nullable=True), schema="common") + op.alter_column("entity_slugs", "project_id", existing_type=sa.String(length=26), nullable=True, schema="common") + op.create_index( + op.f("ix_common_entity_slugs_data_connector_id"), + "entity_slugs", + ["data_connector_id"], + unique=False, + schema="common", + ) + op.create_foreign_key( + "entity_slugs_data_connector_id_fk", + "entity_slugs", + "data_connectors", + ["data_connector_id"], + ["id"], + source_schema="common", + referent_schema="storage", + ondelete="CASCADE", + ) + op.create_table( + "data_connector_secrets", + sa.Column("user_id", sa.String(length=36), nullable=False), + sa.Column("data_connector_id", sa.String(length=26), nullable=False), + sa.Column("name", sa.String(), nullable=False), + sa.Column("secret_id", sa.String(length=26), nullable=False), + sa.ForeignKeyConstraint(["data_connector_id"], ["storage.data_connectors.id"], ondelete="CASCADE"), + sa.ForeignKeyConstraint(["secret_id"], ["secrets.secrets.id"], ondelete="CASCADE"), + sa.ForeignKeyConstraint(["user_id"], ["users.users.keycloak_id"], ondelete="CASCADE"), + sa.PrimaryKeyConstraint("user_id", "data_connector_id", "name"), + schema="storage", + ) + op.create_index( + "ix_storage_data_connector_secrets_user_id_data_connector_id", + "data_connector_secrets", + ["user_id", "data_connector_id"], + unique=False, + schema="storage", + ) + op.drop_index( + "ix_storage_cloud_storage_secrets_user_id_storage_id", table_name="cloud_storage_secrets", schema="storage" + ) + op.drop_table("cloud_storage_secrets", schema="storage") + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "cloud_storage_secrets", + sa.Column("user_id", sa.String(length=36), autoincrement=False, nullable=False), + sa.Column("storage_id", sa.String(length=26), autoincrement=False, nullable=False), + sa.Column("name", sa.String(length=99), autoincrement=False, nullable=False), + sa.Column("secret_id", sa.String(length=26), autoincrement=False, nullable=False), + sa.ForeignKeyConstraint( + ["secret_id"], ["secrets.secrets.id"], name="cloud_storage_secrets_secret_id_fkey", ondelete="CASCADE" + ), + sa.ForeignKeyConstraint( + ["storage_id"], + ["storage.cloud_storage.storage_id"], + name="cloud_storage_secrets_storage_id_fkey", + ondelete="CASCADE", + ), + sa.ForeignKeyConstraint( + ["user_id"], ["users.users.keycloak_id"], name="cloud_storage_secrets_user_id_fkey", ondelete="CASCADE" + ), + sa.PrimaryKeyConstraint("user_id", "storage_id", "name", name="_unique_user_id_storage_id_name"), + schema="storage", + ) + op.create_index( + "ix_storage_cloud_storage_secrets_user_id_storage_id", + "cloud_storage_secrets", + ["user_id", "storage_id"], + unique=False, + schema="storage", + ) + op.drop_index( + "ix_storage_data_connector_secrets_user_id_data_connector_id", + table_name="data_connector_secrets", + schema="storage", + ) + op.drop_table("data_connector_secrets", schema="storage") + op.drop_constraint("entity_slugs_data_connector_id_fk", "entity_slugs", schema="common", type_="foreignkey") + op.drop_index(op.f("ix_common_entity_slugs_data_connector_id"), table_name="entity_slugs", schema="common") + op.alter_column("entity_slugs", "project_id", existing_type=sa.String(length=26), nullable=False, schema="common") + op.drop_column("entity_slugs", "data_connector_id", schema="common") + op.drop_index(op.f("ix_storage_data_connectors_created_by_id"), table_name="data_connectors", schema="storage") + op.drop_table("data_connectors", schema="storage") + # ### end Alembic commands ### diff --git a/components/renku_data_services/migrations/versions/5335b8548c79_add_authorization_for_data_connectors.py b/components/renku_data_services/migrations/versions/5335b8548c79_add_authorization_for_data_connectors.py new file mode 100644 index 000000000..4dbd9323e --- /dev/null +++ b/components/renku_data_services/migrations/versions/5335b8548c79_add_authorization_for_data_connectors.py @@ -0,0 +1,61 @@ +"""add authorization for data connectors + +Revision ID: 5335b8548c79 +Revises: 3cf2adf9896b +Create Date: 2024-09-12 13:11:11.087316 + +""" + +import logging + +import sqlalchemy as sa +from alembic import op + +from renku_data_services.authz.config import AuthzConfig +from renku_data_services.authz.schemas import generate_v4 + +# revision identifiers, used by Alembic. +revision = "5335b8548c79" +down_revision = "3cf2adf9896b" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + config = AuthzConfig.from_env() + client = config.authz_client() + connection = op.get_bind() + with connection.begin_nested() as tx: + op.execute(sa.text("LOCK TABLE projects.projects IN EXCLUSIVE MODE")) + stmt = ( + sa.select(sa.column("id", type_=sa.VARCHAR)) + .select_from(sa.table("projects", schema="projects")) + .where(sa.column("visibility") == sa.literal("public", type_=sa.Enum("visibility"))) + ) + project_ids = connection.scalars(stmt).all() + v4 = generate_v4(project_ids) + responses = v4.upgrade(client) + tx.commit() + logging.info( + f"Finished upgrading the Authz schema to version 4 in Alembic revision {revision}, response: {responses}" + ) + + +def downgrade() -> None: + config = AuthzConfig.from_env() + client = config.authz_client() + connection = op.get_bind() + with connection.begin_nested() as tx: + op.execute(sa.text("LOCK TABLE projects.projects IN EXCLUSIVE MODE")) + stmt = ( + sa.select(sa.column("id", type_=sa.VARCHAR)) + .select_from(sa.table("projects", schema="projects")) + .where(sa.column("visibility") == sa.literal("public", type_=sa.Enum("visibility"))) + ) + project_ids = connection.scalars(stmt).all() + v4 = generate_v4(project_ids) + responses = v4.downgrade(client) + tx.commit() + logging.info( + f"Finished downgrading the Authz schema from version 4 in Alembic revision {revision}, response: {responses}" + ) diff --git a/components/renku_data_services/migrations/versions/584598f3b769_expand_and_separate_environments_from_.py b/components/renku_data_services/migrations/versions/584598f3b769_expand_and_separate_environments_from_.py new file mode 100644 index 000000000..6973937d6 --- /dev/null +++ b/components/renku_data_services/migrations/versions/584598f3b769_expand_and_separate_environments_from_.py @@ -0,0 +1,109 @@ +"""expand and separate environments from session launchers + +Revision ID: 584598f3b769 +Revises: 9058bf0a1a12 +Create Date: 2024-08-12 14:25:24.292285 + +""" + +import sqlalchemy as sa +from alembic import op +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "584598f3b769" +down_revision = "9058bf0a1a12" +branch_labels = None +depends_on = None + +default_url: str = "/lab" +working_dir: str = "/home/jovyan/work" +mount_dir: str = "/home/jovyan/work" +uid: int = 1000 +gid: int = 1000 +port: int = 8888 + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.execute("DELETE FROM sessions.launchers") + op.drop_column("launchers", "default_url", schema="sessions") + op.drop_column("launchers", "environment_kind", schema="sessions") + op.drop_column("launchers", "container_image", schema="sessions") + op.execute("DROP TYPE environmentkind CASCADE") + op.execute("CREATE TYPE environmentkind AS ENUM ('GLOBAL', 'CUSTOM')") + op.add_column("environments", sa.Column("port", sa.Integer(), nullable=True), schema="sessions") + op.add_column("environments", sa.Column("working_directory", sa.String(), nullable=True), schema="sessions") + op.add_column("environments", sa.Column("mount_directory", sa.String(), nullable=True), schema="sessions") + op.add_column("environments", sa.Column("uid", sa.Integer(), nullable=True), schema="sessions") + op.add_column("environments", sa.Column("gid", sa.Integer(), nullable=True), schema="sessions") + op.add_column( + "environments", + sa.Column("environment_kind", sa.Enum("GLOBAL", "CUSTOM", name="environmentkind"), nullable=True), + schema="sessions", + ) + op.execute(sa.text("UPDATE sessions.environments SET port = :port WHERE port is NULL").bindparams(port=port)) + op.execute( + sa.text( + "UPDATE sessions.environments SET working_directory = :working_dir WHERE working_directory is NULL" + ).bindparams(working_dir=working_dir) + ) + op.execute( + sa.text( + "UPDATE sessions.environments SET mount_directory = :mount_dir WHERE mount_directory is NULL" + ).bindparams(mount_dir=mount_dir) + ) + op.execute(sa.text("UPDATE sessions.environments SET uid = :uid WHERE uid is NULL").bindparams(uid=uid)) + op.execute(sa.text("UPDATE sessions.environments SET gid = :gid WHERE gid is NULL").bindparams(gid=gid)) + op.execute("UPDATE sessions.environments SET environment_kind = 'GLOBAL' WHERE environment_kind is NULL") + op.execute( + sa.text("UPDATE sessions.environments SET default_url = :default_url WHERE default_url is NULL").bindparams( + default_url=default_url + ) + ) + op.alter_column("environments", "port", nullable=False, schema="sessions") + op.alter_column("environments", "working_directory", nullable=False, schema="sessions") + op.alter_column("environments", "mount_directory", nullable=False, schema="sessions") + op.alter_column("environments", "uid", nullable=False, schema="sessions") + op.alter_column("environments", "gid", nullable=False, schema="sessions") + op.alter_column("environments", "environment_kind", nullable=False, schema="sessions") + op.alter_column( + "environments", "default_url", existing_type=sa.VARCHAR(length=200), nullable=False, schema="sessions" + ) + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("environments", "environment_kind", schema="sessions") + op.drop_column("environments", "gid", schema="sessions") + op.drop_column("environments", "uid", schema="sessions") + op.drop_column("environments", "mount_directory", schema="sessions") + op.drop_column("environments", "working_directory", schema="sessions") + op.drop_column("environments", "port", schema="sessions") + op.execute("DROP TYPE environmentkind") + op.execute("CREATE TYPE environmentkind AS ENUM ('global_environment', 'container_image')") + op.add_column( + "launchers", + sa.Column("container_image", sa.VARCHAR(length=500), autoincrement=False, nullable=True), + schema="sessions", + ) + op.add_column( + "launchers", + sa.Column( + "environment_kind", + postgresql.ENUM("global_environment", "container_image", name="environmentkind"), + autoincrement=False, + nullable=False, + ), + schema="sessions", + ) + op.add_column( + "launchers", + sa.Column("default_url", sa.VARCHAR(length=200), autoincrement=False, nullable=True), + schema="sessions", + ) + op.alter_column( + "environments", "default_url", existing_type=sa.VARCHAR(length=200), nullable=True, schema="sessions" + ) + # ### end Alembic commands ### diff --git a/components/renku_data_services/migrations/versions/88af2fdd2cc7_add_links_from_data_connectors_to_.py b/components/renku_data_services/migrations/versions/88af2fdd2cc7_add_links_from_data_connectors_to_.py new file mode 100644 index 000000000..0bf5825c6 --- /dev/null +++ b/components/renku_data_services/migrations/versions/88af2fdd2cc7_add_links_from_data_connectors_to_.py @@ -0,0 +1,78 @@ +"""add links from data connectors to projects + +Revision ID: 88af2fdd2cc7 +Revises: 5335b8548c79 +Create Date: 2024-09-17 13:55:43.783482 + +""" + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "88af2fdd2cc7" +down_revision = "5335b8548c79" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.create_table( + "data_connector_to_project_links", + sa.Column("id", sa.String(length=26), nullable=False), + sa.Column("data_connector_id", sa.String(length=26), nullable=False), + sa.Column("project_id", sa.String(length=26), nullable=False), + sa.Column("created_by_id", sa.String(length=36), nullable=False), + sa.Column("creation_date", sa.DateTime(timezone=True), nullable=False), + sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False), + sa.ForeignKeyConstraint(["created_by_id"], ["users.users.keycloak_id"], ondelete="CASCADE"), + sa.ForeignKeyConstraint(["data_connector_id"], ["storage.data_connectors.id"], ondelete="CASCADE"), + sa.ForeignKeyConstraint(["project_id"], ["projects.projects.id"], ondelete="CASCADE"), + sa.PrimaryKeyConstraint("id"), + sa.UniqueConstraint("data_connector_id", "project_id", name="_unique_data_connector_id_project_id_uc"), + schema="storage", + ) + op.create_index( + op.f("ix_storage_data_connector_to_project_links_created_by_id"), + "data_connector_to_project_links", + ["created_by_id"], + unique=False, + schema="storage", + ) + op.create_index( + op.f("ix_storage_data_connector_to_project_links_data_connector_id"), + "data_connector_to_project_links", + ["data_connector_id"], + unique=False, + schema="storage", + ) + op.create_index( + op.f("ix_storage_data_connector_to_project_links_project_id"), + "data_connector_to_project_links", + ["project_id"], + unique=False, + schema="storage", + ) + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index( + op.f("ix_storage_data_connector_to_project_links_project_id"), + table_name="data_connector_to_project_links", + schema="storage", + ) + op.drop_index( + op.f("ix_storage_data_connector_to_project_links_data_connector_id"), + table_name="data_connector_to_project_links", + schema="storage", + ) + op.drop_index( + op.f("ix_storage_data_connector_to_project_links_created_by_id"), + table_name="data_connector_to_project_links", + schema="storage", + ) + op.drop_table("data_connector_to_project_links", schema="storage") + # ### end Alembic commands ### diff --git a/components/renku_data_services/migrations/versions/a11752a5afba_migrate_to_entity_slugs.py b/components/renku_data_services/migrations/versions/a11752a5afba_migrate_to_entity_slugs.py new file mode 100644 index 000000000..d27446709 --- /dev/null +++ b/components/renku_data_services/migrations/versions/a11752a5afba_migrate_to_entity_slugs.py @@ -0,0 +1,118 @@ +"""migrate to entity slugs + +Revision ID: a11752a5afba +Revises: 9058bf0a1a12 +Create Date: 2024-09-03 11:18:46.025525 + +""" + +import sqlalchemy as sa +from alembic import op + +# revision identifiers, used by Alembic. +revision = "a11752a5afba" +down_revision = "9058bf0a1a12" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + connection = op.get_bind() + + op.execute("ALTER TABLE projects.project_slugs SET SCHEMA common") + op.rename_table("project_slugs", "entity_slugs", schema="common") + op.execute("ALTER INDEX common.project_slugs_unique_slugs RENAME TO entity_slugs_unique_slugs") + op.execute( + "ALTER INDEX common.ix_projects_project_slugs_namespace_id RENAME TO ix_common_entity_slugs_namespace_id" + ) + op.execute("ALTER INDEX common.ix_projects_project_slugs_project_id RENAME TO ix_common_entity_slugs_project_id") + op.execute("ALTER INDEX common.ix_projects_project_slugs_slug RENAME TO ix_common_entity_slugs_slug") + op.execute("ALTER SEQUENCE common.project_slugs_id_seq RENAME TO entity_slugs_id_seq") + op.drop_constraint("project_slugs_project_id_fk", "entity_slugs", schema="common", type_="foreignkey") + op.create_foreign_key( + "entity_slugs_project_id_fk", + "entity_slugs", + "projects", + ["project_id"], + ["id"], + source_schema="common", + referent_schema="projects", + ondelete="CASCADE", + ) + + op.execute("ALTER TABLE projects.project_slugs_old SET SCHEMA common") + op.rename_table("project_slugs_old", "entity_slugs_old", schema="common") + op.execute( + "ALTER INDEX common.ix_projects_project_slugs_old_created_at RENAME TO ix_common_entity_slugs_old_created_at" + ) + op.execute( + "ALTER INDEX common.ix_projects_project_slugs_old_latest_slug_id RENAME TO ix_common_entity_slugs_old_latest_slug_id" + ) + op.execute("ALTER INDEX common.ix_projects_project_slugs_old_slug RENAME TO ix_common_entity_slugs_old_slug") + op.execute("ALTER SEQUENCE common.project_slugs_old_id_seq RENAME TO entity_slugs_old_id_seq") + + tables = ["entity_slugs", "entity_slugs_old"] + inspector = sa.inspect(op.get_bind()) + found_sequences = inspector.get_sequence_names("common") + for table in tables: + seq = f"{table}_id_seq" + if seq not in found_sequences: + continue + last_id_stmt = sa.select(sa.func.max(sa.column("id", type_=sa.INT))).select_from( + sa.table(table, schema="common") + ) + last_id = connection.scalars(last_id_stmt).one_or_none() + if last_id is None or last_id <= 0: + continue + op.execute(sa.text(f"ALTER SEQUENCE common.{seq} RESTART WITH {last_id + 1}")) + + +def downgrade() -> None: + connection = op.get_bind() + + op.drop_constraint("entity_slugs_project_id_fk", "entity_slugs", schema="common", type_="foreignkey") + op.create_foreign_key( + "project_slugs_project_id_fk", + "entity_slugs", + "projects", + ["project_id"], + ["id"], + source_schema="common", + referent_schema="projects", + ondelete="CASCADE", + ) + op.execute("ALTER SEQUENCE common.entity_slugs_id_seq RENAME TO project_slugs_id_seq") + op.execute("ALTER INDEX common.ix_common_entity_slugs_slug RENAME TO ix_projects_project_slugs_slug") + op.execute("ALTER INDEX common.ix_common_entity_slugs_project_id RENAME TO ix_projects_project_slugs_project_id") + op.execute( + "ALTER INDEX common.ix_common_entity_slugs_namespace_id RENAME TO ix_projects_project_slugs_namespace_id" + ) + op.execute("ALTER INDEX common.entity_slugs_unique_slugs RENAME TO project_slugs_unique_slugs") + op.rename_table("entity_slugs", "project_slugs", schema="common") + op.execute("ALTER TABLE common.project_slugs SET SCHEMA projects") + + op.execute("ALTER SEQUENCE common.entity_slugs_old_id_seq RENAME TO project_slugs_old_id_seq") + op.execute("ALTER INDEX common.ix_common_entity_slugs_old_slug RENAME TO ix_projects_project_slugs_old_slug") + op.execute( + "ALTER INDEX common.ix_common_entity_slugs_old_latest_slug_id RENAME TO ix_projects_project_slugs_old_latest_slug_id" + ) + op.execute( + "ALTER INDEX common.ix_common_entity_slugs_old_created_at RENAME TO ix_projects_project_slugs_old_created_at" + ) + op.rename_table("entity_slugs_old", "project_slugs_old", schema="common") + op.execute("ALTER TABLE common.project_slugs_old SET SCHEMA projects") + + tables = ["project_slugs", "project_slugs_old"] + inspector = sa.inspect(op.get_bind()) + found_sequences = inspector.get_sequence_names("projects") + for table in tables: + seq = f"{table}_id_seq" + if seq not in found_sequences: + continue + last_id_stmt = sa.select(sa.func.max(sa.column("id", type_=sa.INT))).select_from( + sa.table(table, schema="projects") + ) + last_id = connection.scalars(last_id_stmt).one_or_none() + if last_id is None or last_id <= 0: + continue + op.execute(sa.text(f"ALTER SEQUENCE projects.{seq} RESTART WITH {last_id + 1}")) diff --git a/components/renku_data_services/namespace/orm.py b/components/renku_data_services/namespace/orm.py index abc85a74c..f8a69881c 100644 --- a/components/renku_data_services/namespace/orm.py +++ b/components/renku_data_services/namespace/orm.py @@ -3,14 +3,16 @@ from datetime import datetime from typing import Optional, Self, cast -from sqlalchemy import CheckConstraint, DateTime, MetaData, String, func +from sqlalchemy import CheckConstraint, DateTime, Index, MetaData, String, func from sqlalchemy.orm import DeclarativeBase, Mapped, MappedAsDataclass, mapped_column, relationship from sqlalchemy.schema import ForeignKey from ulid import ULID from renku_data_services.base_orm.registry import COMMON_ORM_REGISTRY +from renku_data_services.data_connectors.orm import DataConnectorORM from renku_data_services.errors import errors from renku_data_services.namespace import models +from renku_data_services.project.orm import ProjectORM from renku_data_services.users.models import UserInfo from renku_data_services.users.orm import UserORM from renku_data_services.utils.sqlalchemy import ULIDType @@ -164,7 +166,8 @@ def dump(self) -> models.Namespace: ) name = ( - f"{self.latest_slug.user.first_name} {self.latest_slug.user.last_name}" + f"{self.latest_slug.user.first_name} { + self.latest_slug.user.last_name}" if self.latest_slug.user.first_name and self.latest_slug.user.last_name else self.latest_slug.user.first_name or self.latest_slug.user.last_name ) @@ -177,3 +180,74 @@ def dump(self) -> models.Namespace: underlying_resource_id=self.latest_slug.user_id, name=name, ) + + +class EntitySlugORM(BaseORM): + """Entity slugs.""" + + __tablename__ = "entity_slugs" + __table_args__ = ( + Index("entity_slugs_unique_slugs", "namespace_id", "slug", unique=True), + CheckConstraint( + "CAST (project_id IS NOT NULL AS int) + CAST (data_connector_id IS NOT NULL AS int) BETWEEN 0 AND 1", + name="either_project_id_or_data_connector_id_is_set", + ), + ) + + id: Mapped[int] = mapped_column(primary_key=True, init=False) + slug: Mapped[str] = mapped_column(String(99), index=True, nullable=False) + project_id: Mapped[ULID | None] = mapped_column( + ForeignKey(ProjectORM.id, ondelete="CASCADE", name="entity_slugs_project_id_fk"), index=True, nullable=True + ) + project: Mapped[ProjectORM | None] = relationship(lazy="joined", init=False, repr=False, back_populates="slug") + data_connector_id: Mapped[ULID | None] = mapped_column( + ForeignKey(DataConnectorORM.id, ondelete="CASCADE", name="entity_slugs_data_connector_id_fk"), + index=True, + nullable=True, + ) + data_connector: Mapped[DataConnectorORM | None] = relationship( + lazy="joined", init=False, repr=False, back_populates="slug" + ) + namespace_id: Mapped[ULID] = mapped_column( + ForeignKey(NamespaceORM.id, ondelete="CASCADE", name="entity_slugs_namespace_id_fk"), index=True + ) + namespace: Mapped[NamespaceORM] = relationship(lazy="joined", init=False, repr=False, viewonly=True) + + @classmethod + def create_project_slug(cls, slug: str, project_id: ULID, namespace_id: ULID) -> "EntitySlugORM": + """Create an entity slug for a project.""" + return cls( + slug=slug, + project_id=project_id, + data_connector_id=None, + namespace_id=namespace_id, + ) + + @classmethod + def create_data_connector_slug(cls, slug: str, data_connector_id: ULID, namespace_id: ULID) -> "EntitySlugORM": + """Create an entity slug for a data connector.""" + return cls( + slug=slug, + project_id=None, + data_connector_id=data_connector_id, + namespace_id=namespace_id, + ) + + +class EntitySlugOldORM(BaseORM): + """Entity slugs history.""" + + __tablename__ = "entity_slugs_old" + + id: Mapped[int] = mapped_column(primary_key=True, init=False) + slug: Mapped[str] = mapped_column(String(99), index=True, nullable=False) + created_at: Mapped[datetime] = mapped_column( + DateTime(timezone=True), nullable=False, index=True, init=False, server_default=func.now() + ) + latest_slug_id: Mapped[int] = mapped_column( + ForeignKey(EntitySlugORM.id, ondelete="CASCADE"), + nullable=False, + init=False, + index=True, + ) + latest_slug: Mapped[EntitySlugORM] = relationship(lazy="joined", repr=False, viewonly=True) diff --git a/components/renku_data_services/notebooks/api.spec.yaml b/components/renku_data_services/notebooks/api.spec.yaml index 702934783..6f8ac9774 100644 --- a/components/renku_data_services/notebooks/api.spec.yaml +++ b/components/renku_data_services/notebooks/api.spec.yaml @@ -35,7 +35,7 @@ paths: name: server_name required: true schema: - type: string + "$ref": "#/components/schemas/ServerName" - description: 'The maximum number of (most recent) lines to return from the logs.' in: query name: max_lines @@ -152,7 +152,7 @@ paths: name: server_name required: true schema: - type: string + "$ref": "#/components/schemas/ServerName" - description: | If true, delete immediately disregarding the grace period of the underlying JupyterServer resource. @@ -187,7 +187,7 @@ paths: name: server_name required: true schema: - type: string + "$ref": "#/components/schemas/ServerName" responses: '200': content: @@ -211,7 +211,7 @@ paths: name: server_name required: true schema: - type: string + "$ref": "#/components/schemas/ServerName" requestBody: content: application/json: @@ -244,6 +244,159 @@ paths: description: The server exists but could not be successfully hibernated. tags: - notebooks + "/sessions": + post: + summary: Launch a new session + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/SessionPostRequest" + responses: + "201": + description: The session was created + content: + application/json: + schema: + $ref: "#/components/schemas/SessionResponse" + "200": + description: The session already exists + content: + application/json: + schema: + $ref: "#/components/schemas/SessionResponse" + default: + $ref: "#/components/responses/Error" + tags: + - sessions + get: + summary: Get a list of all sessions for a user + responses: + "200": + description: Information about the sessions + content: + application/json: + schema: + $ref: "#/components/schemas/SessionListResponse" + default: + $ref: "#/components/responses/Error" + tags: + - sessions + "/sessions/{session_id}": + get: + summary: Get information about a specific session + parameters: + - description: The id of the session + in: path + name: session_id + required: true + schema: + type: string + responses: + "200": + description: Information about the session + content: + application/json: + schema: + $ref: "#/components/schemas/SessionResponse" + default: + $ref: "#/components/responses/Error" + tags: + - sessions + delete: + parameters: + - description: The id of the session that should be deleted + in: path + name: session_id + required: true + schema: + type: string + summary: Fully remove a session + responses: + "204": + description: The session was deleted or it never existed in the first place + default: + $ref: "#/components/responses/Error" + tags: + - sessions + patch: + summary: Patch a session + parameters: + - description: The id of the session + in: path + name: session_id + required: true + schema: + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/SessionPatchRequest" + responses: + "200": + description: The session was patched + content: + application/json: + schema: + $ref: "#/components/schemas/SessionResponse" + default: + $ref: "#/components/responses/Error" + tags: + - sessions + "/sessions/{session_id}/logs": + get: + summary: Get all logs from a specific session + parameters: + - description: The id of the session + in: path + name: session_id + required: true + schema: + type: string + - description: The maximum number of most-recent lines to return for each container + in: query + name: max_lines + required: false + schema: + type: integer + default: 250 + responses: + "200": + description: The session logs + content: + application/json: + schema: + $ref: "#/components/schemas/SessionLogsResponse" + default: + $ref: "#/components/responses/Error" + tags: + - sessions + "/sessions/images": + get: + summary: Check if a session image exists + parameters: + - description: The Docker image URL (tag included) that should be fetched. + in: query + name: image_url + required: true + schema: + type: string + responses: + "200": + description: The docker image can be found + "404": + description: The docker image cannot be found or the user does not have permissions to access it + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + default: + $ref: "#/components/responses/Error" + tags: + - sessions components: schemas: BoolServerOptionsChoice: @@ -302,11 +455,8 @@ components: message: type: string example: "Something went wrong - please try again later" - required: - - "code" - - "message" - required: - - "error" + required: ["code", "message"] + required: ["error"] Generated: properties: enabled: @@ -325,7 +475,7 @@ components: repositories: type: array default: [] - items: + items: "$ref": "#/components/schemas/LaunchNotebookRequestRepository" cloudstorage: default: [] @@ -336,7 +486,7 @@ components: default: 1 type: integer resource_class_id: - default: + default: nullable: true type: integer environment_variables: @@ -352,7 +502,6 @@ components: required: - project_id - launcher_id - - image type: object LaunchNotebookRequestRepository: properties: @@ -387,7 +536,7 @@ components: default: {} type: object image: - default: + default: nullable: true type: string lfs_auto_fetch: @@ -396,13 +545,13 @@ components: namespace: type: string notebook: - default: + default: nullable: true type: string project: type: string resource_class_id: - default: + default: nullable: true type: integer serverOptions: @@ -413,7 +562,7 @@ components: user_secrets: allOf: - "$ref": "#/components/schemas/UserSecrets" - default: + default: nullable: true required: - commit_sha @@ -454,7 +603,7 @@ components: image: type: string name: - type: string + "$ref": "#/components/schemas/ServerName" resources: "$ref": "#/components/schemas/UserPodResources" started: @@ -508,7 +657,7 @@ components: properties: configuration: additionalProperties: {} - default: + default: nullable: true type: object readonly: @@ -517,7 +666,7 @@ components: source_path: type: string storage_id: - default: + default: nullable: true type: string target_path: @@ -649,8 +798,6 @@ components: "$ref": "#/components/schemas/ResourceRequests" usage: "$ref": "#/components/schemas/ResourceUsage" - required: - - requests type: object UserSecrets: properties: @@ -723,6 +870,172 @@ components: - renku.io/projectName - renku.io/repository type: object + SessionPostRequest: + properties: + launcher_id: + $ref: "#/components/schemas/Ulid" + disk_storage: + default: 1 + type: integer + description: The size of disk storage for the session, in gigabytes + resource_class_id: + default: + nullable: true + type: integer + cloudstorage: + $ref: "#/components/schemas/SessionCloudStoragePostList" + required: + - launcher_id + type: object + SessionResponse: + properties: + image: + type: string + name: + "$ref": "#/components/schemas/ServerName" + resources: + "$ref": "#/components/schemas/SessionResources" + started: + format: date-time + nullable: true + type: string + status: + "$ref": "#/components/schemas/SessionStatus" + url: + type: string + project_id: + $ref: "#/components/schemas/Ulid" + launcher_id: + $ref: "#/components/schemas/Ulid" + resource_class_id: + type: integer + required: + - image + - name + - resources + - started + - status + - url + - project_id + - launcher_id + - resource_class_id + type: object + SessionListResponse: + items: + "$ref": "#/components/schemas/SessionResponse" + type: array + SessionPatchRequest: + properties: + resource_class_id: + type: integer + state: + enum: + - running + - hibernated + type: string + SessionStatus: + properties: + message: + type: string + state: + enum: + - running + - starting + - stopping + - failed + - hibernated + type: string + will_hibernate_at: + format: date-time + nullable: true + type: string + will_delete_at: + format: date-time + nullable: true + type: string + ready_containers: + type: integer + minimum: 0 + total_containers: + type: integer + minimum: 0 + required: + - state + - ready_containers + - total_containers + type: object + SessionResources: + properties: + requests: + "$ref": "#/components/schemas/SessionResourcesRequests" + type: object + SessionResourcesRequests: + properties: + cpu: + type: number + description: Fractional CPUs + gpu: + type: integer + description: Number of GPUs used + memory: + type: integer + description: Ammount of RAM for the session, in gigabytes + storage: + type: integer + description: The size of disk storage for the session, in gigabytes + example: + cpu: 1.5 + memory: 1 + storage: 40 + gpu: 0 + type: object + SessionLogsResponse: + type: object + additionalProperties: + type: string + example: + "container-A": "Log line 1\nLog line 2" + "container-B": "Log line 1\nLog line 2" + Ulid: + description: ULID identifier + type: string + minLength: 26 + maxLength: 26 + pattern: "^[0-7][0-9A-HJKMNP-TV-Z]{25}$" + SessionCloudStoragePostList: + type: array + items: + "$ref": "#/components/schemas/SessionCloudStoragePost" + SessionCloudStoragePost: + type: object + properties: + configuration: + type: object + additionalProperties: true + readonly: + type: boolean + default: true + source_path: + type: string + target_path: + type: string + storage_id: + allOf: + - "$ref": "#/components/schemas/Ulid" + - description: The storage ID is used to know which storage config from the DB should be overriden + required: + - configuration + - source_path + - target_path + - storage_id + ServerName: + type: string + minLength: 5 + # The actual limit from k8s is 63 characters but we leave some leeway in case we need to make changes + # Note that changing this should be compatible with the methods that generate server names in the code + maxLength: 50 + pattern: '^[a-z]([-a-z0-9]*[a-z0-9])?$' + example: d185e68d-d43-renku-2-b9ac279a4e8a85ac28d08 responses: Error: description: The schema for all 4xx and 5xx responses diff --git a/components/renku_data_services/notebooks/api/amalthea_patches/cloudstorage.py b/components/renku_data_services/notebooks/api/amalthea_patches/cloudstorage.py index e18d4e7c1..861f05ec6 100644 --- a/components/renku_data_services/notebooks/api/amalthea_patches/cloudstorage.py +++ b/components/renku_data_services/notebooks/api/amalthea_patches/cloudstorage.py @@ -3,23 +3,25 @@ from typing import TYPE_CHECKING, Any if TYPE_CHECKING: + # NOTE: If these are directly imported then you get circular imports. from renku_data_services.notebooks.api.classes.cloud_storage import ICloudStorageRequest from renku_data_services.notebooks.api.classes.server import UserServer -def main(server: "UserServer") -> list[dict[str, Any]]: +async def main(server: "UserServer") -> list[dict[str, Any]]: """Cloud storage patches.""" cloud_storage_patches: list[dict[str, Any]] = [] cloud_storage_request: ICloudStorageRequest if not server.cloudstorage: return [] + repositories = await server.repositories() for i, cloud_storage_request in enumerate(server.cloudstorage): cloud_storage_patches.extend( cloud_storage_request.get_manifest_patch( f"{server.server_name}-ds-{i}", server.k8s_client.preferred_namespace ) ) - if server.repositories: + if repositories: cloud_storage_patches.append( { "type": "application/json-patch+json", diff --git a/components/renku_data_services/notebooks/api/amalthea_patches/general.py b/components/renku_data_services/notebooks/api/amalthea_patches/general.py index 897858cac..12dcc0434 100644 --- a/components/renku_data_services/notebooks/api/amalthea_patches/general.py +++ b/components/renku_data_services/notebooks/api/amalthea_patches/general.py @@ -2,9 +2,8 @@ from typing import TYPE_CHECKING, Any -from renku_data_services.notebooks.api.classes.user import RegisteredUser - if TYPE_CHECKING: + # NOTE: If these are directly imported then you get circular imports. from renku_data_services.notebooks.api.classes.server import UserServer @@ -31,7 +30,7 @@ def session_tolerations(server: "UserServer") -> list[dict[str, Any]]: "op": "add", "path": "/statefulset/spec/template/spec/tolerations", "value": default_tolerations - + [i.json_match_expression() for i in server.server_options.tolerations], + + [toleration.json_match_expression() for toleration in server.server_options.tolerations], } ], } @@ -159,7 +158,7 @@ def test(server: "UserServer") -> list[dict[str, Any]]: # does not use all containers. container_names = ( server.config.sessions.containers.registered[:2] - if isinstance(server.user, RegisteredUser) + if server.user.is_authenticated else server.config.sessions.containers.anonymous[:1] ) for container_ind, container_name in enumerate(container_names): @@ -181,7 +180,7 @@ def test(server: "UserServer") -> list[dict[str, Any]]: def oidc_unverified_email(server: "UserServer") -> list[dict[str, Any]]: """Allow users whose email is unverified in Keycloak to still be able to access their sessions.""" patches = [] - if isinstance(server.user, RegisteredUser): + if server.user.is_authenticated: # modify oauth2 proxy to accept users whose email has not been verified # usually enabled for dev purposes patches.append( diff --git a/components/renku_data_services/notebooks/api/amalthea_patches/git_proxy.py b/components/renku_data_services/notebooks/api/amalthea_patches/git_proxy.py index a12f3c628..38f738321 100644 --- a/components/renku_data_services/notebooks/api/amalthea_patches/git_proxy.py +++ b/components/renku_data_services/notebooks/api/amalthea_patches/git_proxy.py @@ -4,58 +4,111 @@ from dataclasses import asdict from typing import TYPE_CHECKING, Any +from kubernetes import client + +from renku_data_services.base_models.core import AnonymousAPIUser, AuthenticatedAPIUser from renku_data_services.notebooks.api.amalthea_patches.utils import get_certificates_volume_mounts -from renku_data_services.notebooks.api.classes.user import AnonymousUser +from renku_data_services.notebooks.api.classes.repository import GitProvider, Repository +from renku_data_services.notebooks.config import NotebooksConfig if TYPE_CHECKING: + # NOTE: If these are directly imported then you get circular imports. from renku_data_services.notebooks.api.classes.server import UserServer -def main(server: "UserServer") -> list[dict[str, Any]]: +async def main_container( + user: AnonymousAPIUser | AuthenticatedAPIUser, + config: NotebooksConfig, + repositories: list[Repository], + git_providers: list[GitProvider], +) -> client.V1Container | None: """The patch that adds the git proxy container to a session statefulset.""" - user_is_anonymous = isinstance(server.user, AnonymousUser) - if user_is_anonymous or not server.repositories: - return [] + if not user.is_authenticated or not repositories or user.access_token is None or user.refresh_token is None: + return None etc_cert_volume_mount = get_certificates_volume_mounts( - server.config, + config, custom_certs=False, etc_certs=True, read_only_etc_certs=True, ) - patches = [] prefix = "GIT_PROXY_" env = [ - {"name": f"{prefix}PORT", "value": str(server.config.sessions.git_proxy.port)}, - {"name": f"{prefix}HEALTH_PORT", "value": str(server.config.sessions.git_proxy.health_port)}, - { - "name": f"{prefix}ANONYMOUS_SESSION", - "value": "true" if user_is_anonymous else "false", - }, - {"name": f"{prefix}RENKU_ACCESS_TOKEN", "value": str(server.user.access_token)}, - {"name": f"{prefix}RENKU_REFRESH_TOKEN", "value": str(server.user.refresh_token)}, - {"name": f"{prefix}RENKU_REALM", "value": server.config.keycloak_realm}, - { - "name": f"{prefix}RENKU_CLIENT_ID", - "value": str(server.config.sessions.git_proxy.renku_client_id), + client.V1EnvVar(name=f"{prefix}PORT", value=str(config.sessions.git_proxy.port)), + client.V1EnvVar(name=f"{prefix}HEALTH_PORT", value=str(config.sessions.git_proxy.health_port)), + client.V1EnvVar( + name=f"{prefix}ANONYMOUS_SESSION", + value="false" if user.is_authenticated else "true", + ), + client.V1EnvVar(name=f"{prefix}RENKU_ACCESS_TOKEN", value=str(user.access_token)), + client.V1EnvVar(name=f"{prefix}RENKU_REFRESH_TOKEN", value=str(user.refresh_token)), + client.V1EnvVar(name=f"{prefix}RENKU_REALM", value=config.keycloak_realm), + client.V1EnvVar( + name=f"{prefix}RENKU_CLIENT_ID", + value=str(config.sessions.git_proxy.renku_client_id), + ), + client.V1EnvVar( + name=f"{prefix}RENKU_CLIENT_SECRET", + value=str(config.sessions.git_proxy.renku_client_secret), + ), + client.V1EnvVar(name=f"{prefix}RENKU_URL", value="https://" + config.sessions.ingress.host), + client.V1EnvVar( + name=f"{prefix}REPOSITORIES", + value=json.dumps([asdict(repo) for repo in repositories]), + ), + client.V1EnvVar( + name=f"{prefix}PROVIDERS", + value=json.dumps( + [dict(id=provider.id, access_token_url=provider.access_token_url) for provider in git_providers] + ), + ), + ] + container = client.V1Container( + image=config.sessions.git_proxy.image, + security_context={ + "fsGroup": 100, + "runAsGroup": 1000, + "runAsUser": 1000, + "allowPrivilegeEscalation": False, + "runAsNonRoot": True, }, - { - "name": f"{prefix}RENKU_CLIENT_SECRET", - "value": str(server.config.sessions.git_proxy.renku_client_secret), + name="git-proxy", + env=env, + liveness_probe={ + "httpGet": { + "path": "/health", + "port": config.sessions.git_proxy.health_port, + }, + "initialDelaySeconds": 3, }, - {"name": f"{prefix}RENKU_URL", "value": "https://" + server.config.sessions.ingress.host}, - { - "name": f"{prefix}REPOSITORIES", - "value": json.dumps([asdict(repo) for repo in server.repositories]), + readiness_probe={ + "httpGet": { + "path": "/health", + "port": config.sessions.git_proxy.health_port, + }, + "initialDelaySeconds": 3, }, - { - "name": f"{prefix}PROVIDERS", - "value": json.dumps( - [dict(id=provider.id, access_token_url=provider.access_token_url) for provider in server.git_providers] - ), + volume_mounts=etc_cert_volume_mount, + resources={ + "requests": {"memory": "16Mi", "cpu": "50m"}, }, - ] + ) + return container + + +async def main(server: "UserServer") -> list[dict[str, Any]]: + """The patch that adds the git proxy container to a session statefulset.""" + repositories = await server.repositories() + if not server.user.is_authenticated or not repositories: + return [] + + git_providers = await server.git_providers() + container = await main_container(server.user, server.config, repositories, git_providers) + if not container: + return [] + + patches = [] patches.append( { @@ -64,37 +117,8 @@ def main(server: "UserServer") -> list[dict[str, Any]]: { "op": "add", "path": "/statefulset/spec/template/spec/containers/-", - "value": { - "image": server.config.sessions.git_proxy.image, - "securityContext": { - "fsGroup": 100, - "runAsGroup": 1000, - "runAsUser": 1000, - "allowPrivilegeEscalation": False, - "runAsNonRoot": True, - }, - "name": "git-proxy", - "env": env, - "livenessProbe": { - "httpGet": { - "path": "/health", - "port": server.config.sessions.git_proxy.health_port, - }, - "initialDelaySeconds": 3, - }, - "readinessProbe": { - "httpGet": { - "path": "/health", - "port": server.config.sessions.git_proxy.health_port, - }, - "initialDelaySeconds": 3, - }, - "volumeMounts": etc_cert_volume_mount, - "resources": { - "requests": {"memory": "16Mi", "cpu": "50m"}, - }, - }, - } + "value": client.ApiClient().sanitize_for_serialization(container), + }, ], } ) diff --git a/components/renku_data_services/notebooks/api/amalthea_patches/git_sidecar.py b/components/renku_data_services/notebooks/api/amalthea_patches/git_sidecar.py index 05d067ae5..ceac8e248 100644 --- a/components/renku_data_services/notebooks/api/amalthea_patches/git_sidecar.py +++ b/components/renku_data_services/notebooks/api/amalthea_patches/git_sidecar.py @@ -3,18 +3,18 @@ import os from typing import TYPE_CHECKING, Any -from renku_data_services.notebooks.api.classes.user import RegisteredUser - if TYPE_CHECKING: + # NOTE: If these are directly imported then you get circular imports. from renku_data_services.notebooks.api.classes.server import UserServer -def main(server: "UserServer") -> list[dict[str, Any]]: +async def main(server: "UserServer") -> list[dict[str, Any]]: """Adds the git sidecar container to the session statefulset.""" # NOTE: Sessions can be persisted only for registered users - if not isinstance(server.user, RegisteredUser): + if not server.user.is_authenticated: return [] - if not server.repositories: + repositories = await server.repositories() + if not repositories: return [] gitlab_project = getattr(server, "gitlab_project", None) @@ -22,7 +22,7 @@ def main(server: "UserServer") -> list[dict[str, Any]]: commit_sha = getattr(server, "commit_sha", None) volume_mount = { - "mountPath": server.work_dir.absolute().as_posix(), + "mountPath": server.work_dir.as_posix(), "name": "workspace", } if gl_project_path: @@ -51,7 +51,7 @@ def main(server: "UserServer") -> list[dict[str, Any]]: "env": [ { "name": "GIT_RPC_MOUNT_PATH", - "value": server.work_dir.absolute().as_posix(), + "value": server.work_dir.as_posix(), }, { "name": "GIT_RPC_PORT", @@ -91,7 +91,7 @@ def main(server: "UserServer") -> list[dict[str, Any]]: }, { "name": "RENKU_USERNAME", - "value": f"{server.user.username}", + "value": f"{server.user.id}", }, { "name": "GIT_RPC_GIT_PROXY_HEALTH_PORT", diff --git a/components/renku_data_services/notebooks/api/amalthea_patches/init_containers.py b/components/renku_data_services/notebooks/api/amalthea_patches/init_containers.py index a28823517..638dd6171 100644 --- a/components/renku_data_services/notebooks/api/amalthea_patches/init_containers.py +++ b/components/renku_data_services/notebooks/api/amalthea_patches/init_containers.py @@ -3,24 +3,157 @@ import json import os from dataclasses import asdict -from pathlib import Path +from pathlib import Path, PurePosixPath from typing import TYPE_CHECKING, Any -from gitlab.v4.objects.users import CurrentUser from kubernetes import client +from renku_data_services.base_models.core import AnonymousAPIUser, AuthenticatedAPIUser from renku_data_services.notebooks.api.amalthea_patches.utils import get_certificates_volume_mounts -from renku_data_services.notebooks.api.classes.user import AnonymousUser, RegisteredUser -from renku_data_services.notebooks.config import _NotebooksConfig +from renku_data_services.notebooks.api.classes.repository import GitProvider, Repository +from renku_data_services.notebooks.config import NotebooksConfig if TYPE_CHECKING: + # NOTE: If these are directly imported then you get circular imports. from renku_data_services.notebooks.api.classes.server import UserServer -def git_clone(server: "UserServer") -> list[dict[str, Any]]: - """Adds the patch for the init container that clones the git repository.""" - if not server.repositories: - return [] +async def git_clone_container_v2( + user: AuthenticatedAPIUser | AnonymousAPIUser, + config: NotebooksConfig, + repositories: list[Repository], + git_providers: list[GitProvider], + workspace_mount_path: PurePosixPath, + work_dir: PurePosixPath, + lfs_auto_fetch: bool = False, +) -> dict[str, Any] | None: + """Returns the specification for the container that clones the user's repositories for new operator.""" + amalthea_session_work_volume: str = "amalthea-volume" + if not repositories: + return None + + etc_cert_volume_mount = get_certificates_volume_mounts( + config, + custom_certs=False, + etc_certs=True, + read_only_etc_certs=True, + ) + + prefix = "GIT_CLONE_" + env = [ + {"name": f"{prefix}WORKSPACE_MOUNT_PATH", "value": workspace_mount_path.as_posix()}, + { + "name": f"{prefix}MOUNT_PATH", + "value": work_dir.as_posix(), + }, + { + "name": f"{prefix}LFS_AUTO_FETCH", + "value": "1" if lfs_auto_fetch else "0", + }, + { + "name": f"{prefix}USER__USERNAME", + "value": user.email, + }, + { + "name": f"{prefix}USER__RENKU_TOKEN", + "value": str(user.access_token), + }, + {"name": f"{prefix}IS_GIT_PROXY_ENABLED", "value": "0" if user.is_anonymous else "1"}, + { + "name": f"{prefix}SENTRY__ENABLED", + "value": str(config.sessions.git_clone.sentry.enabled).lower(), + }, + { + "name": f"{prefix}SENTRY__DSN", + "value": config.sessions.git_clone.sentry.dsn, + }, + { + "name": f"{prefix}SENTRY__ENVIRONMENT", + "value": config.sessions.git_clone.sentry.env, + }, + { + "name": f"{prefix}SENTRY__SAMPLE_RATE", + "value": str(config.sessions.git_clone.sentry.sample_rate), + }, + {"name": "SENTRY_RELEASE", "value": os.environ.get("SENTRY_RELEASE")}, + { + "name": "REQUESTS_CA_BUNDLE", + "value": str(Path(etc_cert_volume_mount[0]["mountPath"]) / "ca-certificates.crt"), + }, + { + "name": "SSL_CERT_FILE", + "value": str(Path(etc_cert_volume_mount[0]["mountPath"]) / "ca-certificates.crt"), + }, + ] + if user.is_authenticated: + if user.email: + env.append( + {"name": f"{prefix}USER__EMAIL", "value": user.email}, + ) + full_name = user.get_full_name() + if full_name: + env.append( + { + "name": f"{prefix}USER__FULL_NAME", + "value": full_name, + }, + ) + + # Set up git repositories + for idx, repo in enumerate(repositories): + obj_env = f"{prefix}REPOSITORIES_{idx}_" + env.append( + { + "name": obj_env, + "value": json.dumps(asdict(repo)), + } + ) + + # Set up git providers + required_provider_ids: set[str] = {r.provider for r in repositories if r.provider} + required_git_providers = [p for p in git_providers if p.id in required_provider_ids] + for idx, provider in enumerate(required_git_providers): + obj_env = f"{prefix}GIT_PROVIDERS_{idx}_" + data = dict(id=provider.id, access_token_url=provider.access_token_url) + env.append( + { + "name": obj_env, + "value": json.dumps(data), + } + ) + + return { + "image": config.sessions.git_clone.image, + "name": "git-clone", + "resources": { + "requests": { + "cpu": "100m", + "memory": "100Mi", + } + }, + "securityContext": { + "allowPrivilegeEscalation": False, + "fsGroup": 100, + "runAsGroup": 100, + "runAsUser": 1000, + "runAsNonRoot": True, + }, + "volumeMounts": [ + { + "mountPath": workspace_mount_path.as_posix(), + "name": amalthea_session_work_volume, + }, + *etc_cert_volume_mount, + ], + "env": env, + } + + +async def git_clone_container(server: "UserServer") -> dict[str, Any] | None: + """Returns the specification for the container that clones the user's repositories.""" + repositories = await server.repositories() + if not repositories: + return None etc_cert_volume_mount = get_certificates_volume_mounts( server.config, @@ -29,16 +162,15 @@ def git_clone(server: "UserServer") -> list[dict[str, Any]]: read_only_etc_certs=True, ) - user_is_anonymous = isinstance(server.user, AnonymousUser) prefix = "GIT_CLONE_" env = [ { "name": f"{prefix}WORKSPACE_MOUNT_PATH", - "value": server.workspace_mount_path.absolute().as_posix(), + "value": server.workspace_mount_path.as_posix(), }, { "name": f"{prefix}MOUNT_PATH", - "value": server.work_dir.absolute().as_posix(), + "value": server.work_dir.as_posix(), }, { "name": f"{prefix}LFS_AUTO_FETCH", @@ -46,13 +178,13 @@ def git_clone(server: "UserServer") -> list[dict[str, Any]]: }, { "name": f"{prefix}USER__USERNAME", - "value": server.user.username, + "value": server.user.email, }, { "name": f"{prefix}USER__RENKU_TOKEN", "value": str(server.user.access_token), }, - {"name": f"{prefix}IS_GIT_PROXY_ENABLED", "value": "0" if user_is_anonymous else "1"}, + {"name": f"{prefix}IS_GIT_PROXY_ENABLED", "value": "0" if server.user.is_anonymous else "1"}, { "name": f"{prefix}SENTRY__ENABLED", "value": str(server.config.sessions.git_clone.sentry.enabled).lower(), @@ -79,21 +211,22 @@ def git_clone(server: "UserServer") -> list[dict[str, Any]]: "value": str(Path(etc_cert_volume_mount[0]["mountPath"]) / "ca-certificates.crt"), }, ] - if ( - isinstance(server.user, RegisteredUser) - and isinstance(server.user.gitlab_user, CurrentUser) - and not user_is_anonymous - ): - env += [ - {"name": f"{prefix}USER__EMAIL", "value": server.user.gitlab_user.email}, - { - "name": f"{prefix}USER__FULL_NAME", - "value": server.user.gitlab_user.name, - }, - ] + if server.user.is_authenticated: + if server.user.email: + env.append( + {"name": f"{prefix}USER__EMAIL", "value": server.user.email}, + ) + full_name = server.user.get_full_name() + if full_name: + env.append( + { + "name": f"{prefix}USER__FULL_NAME", + "value": full_name, + }, + ) # Set up git repositories - for idx, repo in enumerate(server.repositories): + for idx, repo in enumerate(repositories): obj_env = f"{prefix}REPOSITORIES_{idx}_" env.append( { @@ -103,7 +236,8 @@ def git_clone(server: "UserServer") -> list[dict[str, Any]]: ) # Set up git providers - for idx, provider in enumerate(server.required_git_providers): + required_git_providers = await server.required_git_providers() + for idx, provider in enumerate(required_git_providers): obj_env = f"{prefix}GIT_PROVIDERS_{idx}_" data = dict(id=provider.id, access_token_url=provider.access_token_url) env.append( @@ -113,6 +247,38 @@ def git_clone(server: "UserServer") -> list[dict[str, Any]]: } ) + return { + "image": server.config.sessions.git_clone.image, + "name": "git-clone", + "resources": { + "requests": { + "cpu": "100m", + "memory": "100Mi", + } + }, + "securityContext": { + "allowPrivilegeEscalation": False, + "fsGroup": 100, + "runAsGroup": 100, + "runAsUser": 1000, + "runAsNonRoot": True, + }, + "volumeMounts": [ + { + "mountPath": server.workspace_mount_path.as_posix(), + "name": "workspace", + }, + *etc_cert_volume_mount, + ], + "env": env, + } + + +async def git_clone(server: "UserServer") -> list[dict[str, Any]]: + """The patch for the init container that clones the git repository.""" + container = await git_clone_container(server) + if not container: + return [] return [ { "type": "application/json-patch+json", @@ -120,39 +286,15 @@ def git_clone(server: "UserServer") -> list[dict[str, Any]]: { "op": "add", "path": "/statefulset/spec/template/spec/initContainers/-", - "value": { - "image": server.config.sessions.git_clone.image, - "name": "git-clone", - "resources": { - "requests": { - "cpu": "100m", - "memory": "100Mi", - } - }, - "securityContext": { - "allowPrivilegeEscalation": False, - "fsGroup": 100, - "runAsGroup": 100, - "runAsUser": 1000, - "runAsNonRoot": True, - }, - "volumeMounts": [ - { - "mountPath": server.workspace_mount_path.absolute().as_posix(), - "name": "workspace", - }, - *etc_cert_volume_mount, - ], - "env": env, - }, + "value": container, }, ], } ] -def certificates(config: _NotebooksConfig) -> list[dict[str, Any]]: - """Add a container that initializes custom certificate authorities for a session.""" +def certificates_container(config: NotebooksConfig) -> tuple[client.V1Container, list[client.V1Volume]]: + """The specification for the container that setups self signed CAs.""" init_container = client.V1Container( name="init-certificates", image=config.sessions.ca_certs.image, @@ -175,12 +317,18 @@ def certificates(config: _NotebooksConfig) -> list[dict[str, Any]]: projected=client.V1ProjectedVolumeSource( default_mode=440, sources=[ - {"secret": {"name": i.get("secret")}} - for i in config.sessions.ca_certs.secrets - if isinstance(i, dict) and i.get("secret") is not None + {"secret": {"name": secret.get("secret")}} + for secret in config.sessions.ca_certs.secrets + if isinstance(secret, dict) and secret.get("secret") is not None ], ), ) + return (init_container, [volume_etc_certs, volume_custom_certs]) + + +def certificates(config: NotebooksConfig) -> list[dict[str, Any]]: + """Add a container that initializes custom certificate authorities for a session.""" + container, vols = certificates_container(config) api_client = client.ApiClient() patches = [ { @@ -189,35 +337,28 @@ def certificates(config: _NotebooksConfig) -> list[dict[str, Any]]: { "op": "add", "path": "/statefulset/spec/template/spec/initContainers/-", - "value": api_client.sanitize_for_serialization(init_container), - }, - ], - }, - { - "type": "application/json-patch+json", - "patch": [ - { - "op": "add", - "path": "/statefulset/spec/template/spec/volumes/-", - "value": api_client.sanitize_for_serialization(volume_etc_certs), - }, - ], - }, - { - "type": "application/json-patch+json", - "patch": [ - { - "op": "add", - "path": "/statefulset/spec/template/spec/volumes/-", - "value": api_client.sanitize_for_serialization(volume_custom_certs), + "value": api_client.sanitize_for_serialization(container), }, ], }, ] + for vol in vols: + patches.append( + { + "type": "application/json-patch+json", + "patch": [ + { + "op": "add", + "path": "/statefulset/spec/template/spec/volumes/-", + "value": api_client.sanitize_for_serialization(vol), + }, + ], + }, + ) return patches -def download_image(server: "UserServer") -> list[dict[str, Any]]: +def download_image_container(server: "UserServer") -> client.V1Container: """Adds a container that does not do anything but simply downloads the session image at startup.""" container = client.V1Container( name="download-image", @@ -231,6 +372,12 @@ def download_image(server: "UserServer") -> list[dict[str, Any]]: } }, ) + return container + + +def download_image(server: "UserServer") -> list[dict[str, Any]]: + """Adds a container that does not do anything but simply downloads the session image at startup.""" + container = download_image_container(server) api_client = client.ApiClient() return [ { diff --git a/components/renku_data_services/notebooks/api/amalthea_patches/inject_certificates.py b/components/renku_data_services/notebooks/api/amalthea_patches/inject_certificates.py index c120b6abb..e46707dc8 100644 --- a/components/renku_data_services/notebooks/api/amalthea_patches/inject_certificates.py +++ b/components/renku_data_services/notebooks/api/amalthea_patches/inject_certificates.py @@ -4,9 +4,9 @@ from typing import TYPE_CHECKING, Any from renku_data_services.notebooks.api.amalthea_patches.utils import get_certificates_volume_mounts -from renku_data_services.notebooks.api.classes.user import RegisteredUser if TYPE_CHECKING: + # NOTE: If these are directly imported then you get circular imports. from renku_data_services.notebooks.api.classes.server import UserServer @@ -31,7 +31,7 @@ def proxy(server: "UserServer") -> list[dict[str, Any]]: ], }, ] - if isinstance(server.user, RegisteredUser): + if server.user.is_authenticated: patches.append( { "type": "application/json-patch+json", diff --git a/components/renku_data_services/notebooks/api/amalthea_patches/jupyter_server.py b/components/renku_data_services/notebooks/api/amalthea_patches/jupyter_server.py index 6ad0b3cb0..6f7affc09 100644 --- a/components/renku_data_services/notebooks/api/amalthea_patches/jupyter_server.py +++ b/components/renku_data_services/notebooks/api/amalthea_patches/jupyter_server.py @@ -5,13 +5,13 @@ from pathlib import Path from typing import TYPE_CHECKING, Any -from gitlab.v4.objects.users import CurrentUser from kubernetes import client -from renku_data_services.notebooks.api.classes.user import RegisteredUser +from renku_data_services.base_models.core import AuthenticatedAPIUser from renku_data_services.notebooks.errors.user import OverriddenEnvironmentVariableError if TYPE_CHECKING: + # NOTE: If these are directly imported then you get circular imports. from renku_data_services.notebooks.api.classes.server import UserServer @@ -30,7 +30,7 @@ def env(server: "UserServer") -> list[dict[str, Any]]: "path": "/statefulset/spec/template/spec/containers/0/env/-", "value": { "name": "RENKU_USERNAME", - "value": server.user.username, + "value": server.user.id, }, }, { @@ -43,7 +43,7 @@ def env(server: "UserServer") -> list[dict[str, Any]]: "path": "/statefulset/spec/template/spec/containers/0/env/-", "value": { "name": "NOTEBOOK_DIR", - "value": server.work_dir.absolute().as_posix(), + "value": server.work_dir.as_posix(), }, }, { @@ -53,7 +53,7 @@ def env(server: "UserServer") -> list[dict[str, Any]]: # relative to $HOME. "value": { "name": "MOUNT_PATH", - "value": server.work_dir.absolute().as_posix(), + "value": server.work_dir.as_posix(), }, }, { @@ -109,21 +109,17 @@ def args() -> list[dict[str, Any]]: return patches -def image_pull_secret(server: "UserServer") -> list[dict[str, Any]]: +def image_pull_secret(server: "UserServer", access_token: str | None) -> list[dict[str, Any]]: """Adds an image pull secret to the session if the session image is not public.""" patches = [] - if ( - isinstance(server.user, RegisteredUser) - and isinstance(server.user.gitlab_user, CurrentUser) - and server.is_image_private - ): + if isinstance(server.user, AuthenticatedAPIUser) and server.is_image_private and access_token: image_pull_secret_name = server.server_name + "-image-secret" registry_secret = { "auths": { server.config.git.registry: { "Username": "oauth2", - "Password": server.user.git_token, - "Email": server.user.gitlab_user.email, + "Password": access_token, + "Email": server.user.email, } } } @@ -227,7 +223,7 @@ def rstudio_env_variables(server: "UserServer") -> list[dict[str, Any]]: "path": "/statefulset/spec/template/spec/containers/0/volumeMounts/-", "value": { "name": secret_name, - "mountPath": mount_location.absolute().as_posix(), + "mountPath": mount_location.as_posix(), "subPath": mount_location.name, "readOnly": True, }, diff --git a/components/renku_data_services/notebooks/api/amalthea_patches/ssh.py b/components/renku_data_services/notebooks/api/amalthea_patches/ssh.py index da565966d..9bb3d99e3 100644 --- a/components/renku_data_services/notebooks/api/amalthea_patches/ssh.py +++ b/components/renku_data_services/notebooks/api/amalthea_patches/ssh.py @@ -2,10 +2,10 @@ from typing import Any -from renku_data_services.notebooks.config import _NotebooksConfig +from renku_data_services.notebooks.config import NotebooksConfig -def main(config: _NotebooksConfig) -> list[dict[str, Any]]: +def main(config: NotebooksConfig) -> list[dict[str, Any]]: """Adds the required configuration to the session statefulset for SSH access.""" if not config.sessions.ssh.enabled: return [] diff --git a/components/renku_data_services/notebooks/api/amalthea_patches/utils.py b/components/renku_data_services/notebooks/api/amalthea_patches/utils.py index fccd7ac58..650970977 100644 --- a/components/renku_data_services/notebooks/api/amalthea_patches/utils.py +++ b/components/renku_data_services/notebooks/api/amalthea_patches/utils.py @@ -4,11 +4,11 @@ from kubernetes import client -from renku_data_services.notebooks.config import _NotebooksConfig +from renku_data_services.notebooks.config import NotebooksConfig def get_certificates_volume_mounts( - config: _NotebooksConfig, + config: NotebooksConfig, etc_certs: bool = True, custom_certs: bool = True, read_only_etc_certs: bool = False, diff --git a/components/renku_data_services/notebooks/api/classes/cloud_storage/__init__.py b/components/renku_data_services/notebooks/api/classes/cloud_storage/__init__.py index 015653284..a66b2728d 100644 --- a/components/renku_data_services/notebooks/api/classes/cloud_storage/__init__.py +++ b/components/renku_data_services/notebooks/api/classes/cloud_storage/__init__.py @@ -6,17 +6,15 @@ class ICloudStorageRequest(Protocol): """The abstract class for cloud storage.""" - exists: bool mount_folder: str - source_folder: str - bucket: str + source_path: str def get_manifest_patch( self, base_name: str, namespace: str, - labels: dict[str, str] = {}, - annotations: dict[str, str] = {}, + labels: dict[str, str] | None = None, + annotations: dict[str, str] | None = None, ) -> list[dict[str, Any]]: """The patches applied to a jupyter server to insert the storage in the session.""" ... diff --git a/components/renku_data_services/notebooks/api/classes/cloud_storage/existing.py b/components/renku_data_services/notebooks/api/classes/cloud_storage/existing.py index 07da3ab5c..b3abeac84 100644 --- a/components/renku_data_services/notebooks/api/classes/cloud_storage/existing.py +++ b/components/renku_data_services/notebooks/api/classes/cloud_storage/existing.py @@ -1,7 +1,10 @@ """Cloud storage.""" from dataclasses import dataclass -from typing import Any, Self +from typing import Any, Self, cast + +from renku_data_services.errors import errors +from renku_data_services.notebooks.crs import JupyterServerV1Alpha1 @dataclass @@ -12,11 +15,13 @@ class ExistingCloudStorage: type: str @classmethod - def from_manifest(cls, manifest: dict[str, Any], storage_class: str = "csi-rclone") -> list[Self]: + def from_manifest(cls, manifest: JupyterServerV1Alpha1, storage_class: str = "csi-rclone") -> list[Self]: """The patches applied to a jupyter server to insert the storage in the session.""" + if manifest.spec is None: + raise errors.ProgrammingError(message="Unexpected manifest format") output: list[Self] = [] - for patch_collection in manifest["spec"]["patches"]: - for patch in patch_collection["patch"]: + for patch_collection in manifest.spec.patches: + for patch in cast(list[dict[str, Any]], patch_collection.patch): if patch["op"] == "test": continue if not isinstance(patch["value"], dict): diff --git a/components/renku_data_services/notebooks/api/classes/data_service.py b/components/renku_data_services/notebooks/api/classes/data_service.py index e2f1c7973..4e7b6d44c 100644 --- a/components/renku_data_services/notebooks/api/classes/data_service.py +++ b/components/renku_data_services/notebooks/api/classes/data_service.py @@ -4,11 +4,20 @@ from typing import Any, NamedTuple, Optional, cast from urllib.parse import urljoin, urlparse -import requests +import httpx from sanic.log import logger +from renku_data_services.base_models import APIUser +from renku_data_services.crc.db import ResourcePoolRepository +from renku_data_services.crc.models import ResourceClass, ResourcePool +from renku_data_services.notebooks.api.classes.repository import ( + INTERNAL_GITLAB_PROVIDER, + GitProvider, + OAuth2Connection, + OAuth2Provider, +) +from renku_data_services.notebooks.api.schemas.server_options import ServerOptions from renku_data_services.notebooks.errors.intermittent import IntermittentError -from renku_data_services.notebooks.errors.programming import ConfigurationError from renku_data_services.notebooks.errors.user import ( AuthenticationError, InvalidCloudStorageConfiguration, @@ -16,10 +25,6 @@ MissingResourceError, ) -from ..schemas.server_options import ServerOptions -from .repository import INTERNAL_GITLAB_PROVIDER, GitProvider, OAuth2Connection, OAuth2Provider -from .user import User - class CloudStorageConfig(NamedTuple): """Cloud storage configuration.""" @@ -40,18 +45,21 @@ class StorageValidator: def __post_init__(self) -> None: self.storage_url = self.storage_url.rstrip("/") - def get_storage_by_id(self, user: User, project_id: int, storage_id: str) -> CloudStorageConfig: + async def get_storage_by_id( + self, user: APIUser, internal_gitlab_user: APIUser, project_id: int, storage_id: str + ) -> CloudStorageConfig: """Get a specific cloud storage configuration by ID.""" headers = None - if user is not None and user.access_token is not None and user.git_token is not None: + if user is not None and user.access_token is not None and internal_gitlab_user.access_token is not None: headers = { "Authorization": f"bearer {user.access_token}", - "Gitlab-Access-Token": user.git_token, + "Gitlab-Access-Token": user.access_token, } # TODO: remove project_id once authz on the data service works properly request_url = self.storage_url + f"/storage/{storage_id}?project_id={project_id}" logger.info(f"getting storage info by id: {request_url}") - res = requests.get(request_url, headers=headers, timeout=10) + async with httpx.AsyncClient() as client: + res = await client.get(request_url, headers=headers, timeout=10) if res.status_code == 404: raise MissingResourceError(message=f"Couldn't find cloud storage with id {storage_id}") if res.status_code == 401: @@ -69,9 +77,10 @@ def get_storage_by_id(self, user: User, project_id: int, storage_id: str) -> Clo name=storage["name"], ) - def validate_storage_configuration(self, configuration: dict[str, Any], source_path: str) -> None: + async def validate_storage_configuration(self, configuration: dict[str, Any], source_path: str) -> None: """Validate the cloud storage configuration.""" - res = requests.post(self.storage_url + "/storage_schema/validate", json=configuration, timeout=10) + async with httpx.AsyncClient() as client: + res = await client.post(self.storage_url + "/storage_schema/validate", json=configuration, timeout=10) if res.status_code == 422: raise InvalidCloudStorageConfiguration( message=f"The provided cloud storage configuration isn't valid: {res.json()}", @@ -81,9 +90,10 @@ def validate_storage_configuration(self, configuration: dict[str, Any], source_p message="The data service sent an unexpected response, please try again later", ) - def obscure_password_fields_for_storage(self, configuration: dict[str, Any]) -> dict[str, Any]: + async def obscure_password_fields_for_storage(self, configuration: dict[str, Any]) -> dict[str, Any]: """Obscures password fields for use with rclone.""" - res = requests.post(self.storage_url + "/storage_schema/obscure", json=configuration, timeout=10) + async with httpx.AsyncClient() as client: + res = await client.post(self.storage_url + "/storage_schema/obscure", json=configuration, timeout=10) if res.status_code != 200: raise InvalidCloudStorageConfiguration( @@ -97,15 +107,17 @@ def obscure_password_fields_for_storage(self, configuration: dict[str, Any]) -> class DummyStorageValidator: """Dummy cloud storage validator used for testing.""" - def get_storage_by_id(self, user: User, project_id: int, storage_id: str) -> CloudStorageConfig: + async def get_storage_by_id( + self, user: APIUser, internal_gitlab_user: APIUser, project_id: int, storage_id: str + ) -> CloudStorageConfig: """Get storage by ID.""" raise NotImplementedError() - def validate_storage_configuration(self, configuration: dict[str, Any], source_path: str) -> None: + async def validate_storage_configuration(self, configuration: dict[str, Any], source_path: str) -> None: """Validate the cloud storage configuration.""" raise NotImplementedError() - def obscure_password_fields_for_storage(self, configuration: dict[str, Any]) -> dict[str, Any]: + async def obscure_password_fields_for_storage(self, configuration: dict[str, Any]) -> dict[str, Any]: """Obscure the password fields in a cloud storage configuration.""" raise NotImplementedError() @@ -114,14 +126,11 @@ def obscure_password_fields_for_storage(self, configuration: dict[str, Any]) -> class CRCValidator: """Calls to the CRC service to validate resource requests.""" - crc_url: str - - def __post_init__(self) -> None: - self.crc_url = self.crc_url.rstrip("/") + rp_repo: ResourcePoolRepository - def validate_class_storage( + async def validate_class_storage( self, - user: User, + user: APIUser, class_id: int, storage: Optional[int] = None, ) -> ServerOptions: @@ -129,105 +138,83 @@ def validate_class_storage( Storage in memory are assumed to be in gigabytes. """ - resource_pools = self._get_resource_pools(user=user) - pool = None - res_class = None + resource_pools = await self.rp_repo.get_resource_pools(user) + pool: ResourcePool | None = None + res_class: ResourceClass | None = None for rp in resource_pools: - for cls in rp["classes"]: - if cls["id"] == class_id: + for cls in rp.classes: + if cls.id == class_id: res_class = cls pool = rp break if pool is None or res_class is None: raise InvalidComputeResourceError(message=f"The resource class ID {class_id} does not exist.") if storage is None: - storage = res_class.get("default_storage", 1) + storage = res_class.default_storage if storage < 1: raise InvalidComputeResourceError(message="Storage requests have to be greater than or equal to 1GB.") - if storage > res_class.get("max_storage"): + if storage > res_class.max_storage: raise InvalidComputeResourceError(message="The requested storage surpasses the maximum value allowed.") options = ServerOptions.from_resource_class(res_class) - options.idle_threshold_seconds = pool.get("idle_threshold") - options.hibernation_threshold_seconds = pool.get("hibernation_threshold") + options.idle_threshold_seconds = pool.idle_threshold + options.hibernation_threshold_seconds = pool.hibernation_threshold options.set_storage(storage, gigabytes=True) - quota = pool.get("quota") - if quota is not None and isinstance(quota, dict): - options.priority_class = quota.get("id") + quota = pool.quota + if quota is not None: + options.priority_class = quota.id return options - def get_default_class(self) -> dict[str, Any]: + async def get_default_class(self) -> ResourceClass: """Get the default resource class from the default resource pool.""" - pools = self._get_resource_pools() - default_pools = [p for p in pools if p.get("default", False)] - if len(default_pools) < 1: - raise ConfigurationError("Cannot find the default resource pool.") - default_pool = default_pools[0] - default_classes: list[dict[str, Any]] = [ - cls for cls in default_pool.get("classes", []) if cls.get("default", False) - ] - if len(default_classes) < 1: - raise ConfigurationError("Cannot find the default resource class.") - return default_classes[0] - - def find_acceptable_class(self, user: User, requested_server_options: ServerOptions) -> Optional[ServerOptions]: + return await self.rp_repo.get_default_resource_class() + + async def find_acceptable_class( + self, user: APIUser, requested_server_options: ServerOptions + ) -> Optional[ServerOptions]: """Find a resource class greater than or equal to the old-style server options being requested. Only classes available to the user are considered. """ - resource_pools = self._get_resource_pools(user=user, server_options=requested_server_options) + resource_pools = await self._get_resource_pools(user=user, server_options=requested_server_options) # Difference and best candidate in the case that the resource class will be # greater than or equal to the request best_larger_or_equal_diff: ServerOptions | None = None best_larger_or_equal_class: ServerOptions | None = None zero_diff = ServerOptions(cpu=0, memory=0, gpu=0, storage=0) for resource_pool in resource_pools: - quota = resource_pool.get("quota") - for resource_class in resource_pool["classes"]: + quota = resource_pool.quota + for resource_class in resource_pool.classes: resource_class_mdl = ServerOptions.from_resource_class(resource_class) - if quota is not None and isinstance(quota, dict): - resource_class_mdl.priority_class = quota.get("id") + if quota is not None: + resource_class_mdl.priority_class = quota.id diff = resource_class_mdl - requested_server_options if ( diff >= zero_diff and (best_larger_or_equal_diff is None or diff < best_larger_or_equal_diff) - and resource_class["matching"] + and resource_class.matching ): best_larger_or_equal_diff = diff best_larger_or_equal_class = resource_class_mdl return best_larger_or_equal_class - def _get_resource_pools( + async def _get_resource_pools( self, - user: Optional[User] = None, + user: APIUser, server_options: Optional[ServerOptions] = None, - ) -> list[dict[str, Any]]: - headers = None - params = None - if user is not None and user.access_token is not None: - headers = {"Authorization": f"bearer {user.access_token}"} + ) -> list[ResourcePool]: + output: list[ResourcePool] = [] if server_options is not None: - max_storage: float | int = 1 - if server_options.storage is not None: - max_storage = ( - server_options.storage - if server_options.gigabytes - else round(server_options.storage / 1_000_000_000) - ) - params = { - "cpu": server_options.cpu, - "gpu": server_options.gpu, - "memory": ( - server_options.memory if server_options.gigabytes else round(server_options.memory / 1_000_000_000) - ), - "max_storage": max_storage, - } - res = requests.get(self.crc_url + "/resource_pools", headers=headers, params=params, timeout=10) - if res.status_code != 200: - raise IntermittentError( - message="The compute resource access control service sent " - "an unexpected response, please try again later", + options_gb = server_options.to_gigabytes() + output = await self.rp_repo.filter_resource_pools( + user, + cpu=options_gb.cpu, + memory=round(options_gb.memory), + max_storage=round(options_gb.storage or 1), + gpu=options_gb.gpu, ) - return cast(list[dict[str, Any]], res.json()) + else: + output = await self.rp_repo.filter_resource_pools(user) + return output @dataclass @@ -236,24 +223,26 @@ class DummyCRCValidator: options: ServerOptions = field(default_factory=lambda: ServerOptions(0.5, 1, 0, 1, "/lab", False, True)) - def validate_class_storage(self, user: User, class_id: int, storage: int | None = None) -> ServerOptions: + async def validate_class_storage(self, user: APIUser, class_id: int, storage: int | None = None) -> ServerOptions: """Validate the storage against the resource class.""" return self.options - def get_default_class(self) -> dict[str, Any]: + async def get_default_class(self) -> ResourceClass: """Get the default resource class.""" - return { - "name": "resource class", - "cpu": 0.1, - "memory": 1, - "gpu": 0, - "max_storage": 100, - "default_storage": 1, - "id": 1, - "default": True, - } - - def find_acceptable_class(self, user: User, requested_server_options: ServerOptions) -> Optional[ServerOptions]: + return ResourceClass( + name="resource class", + cpu=0.1, + memory=1, + max_storage=100, + gpu=0, + id=1, + default_storage=1, + default=True, + ) + + async def find_acceptable_class( + self, user: APIUser, requested_server_options: ServerOptions + ) -> Optional[ServerOptions]: """Find an acceptable resource class based on the required options.""" return self.options @@ -270,16 +259,16 @@ def __post_init__(self) -> None: self.service_url = self.service_url.rstrip("/") self.renku_url = self.renku_url.rstrip("/") - def get_providers(self, user: User) -> list[GitProvider]: + async def get_providers(self, user: APIUser) -> list[GitProvider]: """Get the providers for the specific user.""" if user is None or user.access_token is None: return [] - connections = self.get_oauth2_connections(user=user) + connections = await self.get_oauth2_connections(user=user) providers: dict[str, GitProvider] = dict() for c in connections: if c.provider_id in providers: continue - provider = self.get_oauth2_provider(c.provider_id) + provider = await self.get_oauth2_provider(c.provider_id) access_token_url = urljoin( self.renku_url, urlparse(f"{self.service_url}/oauth2/connections/{c.id}/token").path, @@ -305,23 +294,25 @@ def get_providers(self, user: User) -> list[GitProvider]: ) return providers_list - def get_oauth2_connections(self, user: User | None = None) -> list[OAuth2Connection]: + async def get_oauth2_connections(self, user: APIUser | None = None) -> list[OAuth2Connection]: """Get oauth2 connections.""" if user is None or user.access_token is None: return [] request_url = f"{self.service_url}/oauth2/connections" headers = {"Authorization": f"bearer {user.access_token}"} - res = requests.get(request_url, headers=headers, timeout=10) + async with httpx.AsyncClient() as client: + res = await client.get(request_url, headers=headers, timeout=10) if res.status_code != 200: raise IntermittentError(message="The data service sent an unexpected response, please try again later") connections = res.json() connections = [OAuth2Connection.from_dict(c) for c in connections if c["status"] == "connected"] return connections - def get_oauth2_provider(self, provider_id: str) -> OAuth2Provider: + async def get_oauth2_provider(self, provider_id: str) -> OAuth2Provider: """Get a specific provider.""" request_url = f"{self.service_url}/oauth2/providers/{provider_id}" - res = requests.get(request_url, timeout=10) + async with httpx.AsyncClient() as client: + res = await client.get(request_url, timeout=10) if res.status_code != 200: raise IntermittentError(message="The data service sent an unexpected response, please try again later") provider = res.json() @@ -332,6 +323,6 @@ def get_oauth2_provider(self, provider_id: str) -> OAuth2Provider: class DummyGitProviderHelper: """Helper for git providers.""" - def get_providers(self, user: User) -> list[GitProvider]: + async def get_providers(self, user: APIUser) -> list[GitProvider]: """Get a list of providers.""" return [] diff --git a/components/renku_data_services/notebooks/api/classes/image.py b/components/renku_data_services/notebooks/api/classes/image.py index 6a38aaf7f..6ced400eb 100644 --- a/components/renku_data_services/notebooks/api/classes/image.py +++ b/components/renku_data_services/notebooks/api/classes/image.py @@ -4,7 +4,7 @@ import re from dataclasses import dataclass, field from enum import Enum -from pathlib import Path +from pathlib import PurePosixPath from typing import Any, Optional, Self, cast import requests @@ -101,7 +101,7 @@ def get_image_config(self, image: "Image") -> Optional[dict[str, Any]]: return None return cast(dict[str, Any], res.json()) - def image_workdir(self, image: "Image") -> Optional[Path]: + def image_workdir(self, image: "Image") -> Optional[PurePosixPath]: """Query the docker API to get the workdir of an image.""" config = self.get_image_config(image) if config is None: @@ -112,7 +112,7 @@ def image_workdir(self, image: "Image") -> Optional[Path]: workdir = nested_config.get("WorkingDir", "/") if workdir == "": workdir = "/" - return Path(workdir) + return PurePosixPath(workdir) def with_oauth2_token(self, oauth2_token: str) -> "ImageRepoDockerAPI": """Return a docker API instance with the token as authentication.""" diff --git a/components/renku_data_services/notebooks/api/classes/k8s_client.py b/components/renku_data_services/notebooks/api/classes/k8s_client.py index 26b7f4d4b..6abef4217 100644 --- a/components/renku_data_services/notebooks/api/classes/k8s_client.py +++ b/components/renku_data_services/notebooks/api/classes/k8s_client.py @@ -1,241 +1,216 @@ -"""An abstraction over the k8s client and the k8s-watcher.""" +"""An abstraction over the kr8s kubernetes client and the k8s-watcher.""" import base64 import json -from typing import Any, Optional, cast +import logging +from contextlib import suppress +from typing import Any, Generic, Optional, TypeVar, cast from urllib.parse import urljoin -import requests -from kubernetes import client -from kubernetes.client.exceptions import ApiException -from kubernetes.client.models import V1Container, V1DeleteOptions -from kubernetes.config import load_config -from kubernetes.config.config_exception import ConfigException -from kubernetes.config.incluster_config import SERVICE_CERT_FILENAME, SERVICE_TOKEN_FILENAME, InClusterConfigLoader -from sanic.log import logger +import httpx +from kr8s import NotFoundError, ServerError +from kr8s.asyncio.objects import APIObject, Pod, Secret, StatefulSet +from kubernetes.client import ApiClient, V1Container, V1Secret -from ...errors.intermittent import ( +from renku_data_services.errors import errors +from renku_data_services.notebooks.api.classes.auth import GitlabToken, RenkuTokens +from renku_data_services.notebooks.crs import AmaltheaSessionV1Alpha1, JupyterServerV1Alpha1 +from renku_data_services.notebooks.errors.intermittent import ( CannotStartServerError, DeleteServerError, IntermittentError, JSCacheError, PatchServerError, ) -from ...errors.programming import ProgrammingError -from ...errors.user import MissingResourceError -from ...util.kubernetes_ import find_env_var -from ...util.retries import retry_with_exponential_backoff -from .auth import GitlabToken, RenkuTokens +from renku_data_services.notebooks.errors.programming import ProgrammingError +from renku_data_services.notebooks.errors.user import MissingResourceError +from renku_data_services.notebooks.util.kubernetes_ import find_env_var +from renku_data_services.notebooks.util.retries import ( + retry_with_exponential_backoff_async, +) + +sanitize_for_serialization = ApiClient().sanitize_for_serialization + + +# NOTE The type ignore below is because the kr8s library has no type stubs, they claim pyright better handles type hints +class AmaltheaSessionV1Alpha1Kr8s(APIObject): # type: ignore + """Spec for amalthea sessions used by the k8s client.""" + + kind: str = "AmaltheaSession" + version: str = "amalthea.dev/v1alpha1" + namespaced: bool = True + plural: str = "amaltheasessions" + singular: str = "amaltheasession" + scalable: bool = False + endpoint: str = "amaltheasessions" + + +# NOTE The type ignore below is because the kr8s library has no type stubs, they claim pyright better handles type hints +class JupyterServerV1Alpha1Kr8s(APIObject): # type: ignore + """Spec for jupyter servers used by the k8s client.""" + + kind: str = "JupyterServer" + version: str = "amalthea.dev/v1alpha1" + namespaced: bool = True + plural: str = "jupyterservers" + singular: str = "jupyterserver" + scalable: bool = False + endpoint: str = "jupyterservers" + +_SessionType = TypeVar("_SessionType", JupyterServerV1Alpha1, AmaltheaSessionV1Alpha1) +_Kr8sType = TypeVar("_Kr8sType", JupyterServerV1Alpha1Kr8s, AmaltheaSessionV1Alpha1Kr8s) -class NamespacedK8sClient: + +class NamespacedK8sClient(Generic[_SessionType, _Kr8sType]): """A kubernetes client that operates in a specific namespace.""" - def __init__( - self, - namespace: str, - amalthea_group: str, - amalthea_version: str, - amalthea_plural: str, - ): + def __init__(self, namespace: str, server_type: type[_SessionType], kr8s_type: type[_Kr8sType]): self.namespace = namespace - self.amalthea_group = amalthea_group - self.amalthea_version = amalthea_version - self.amalthea_plural = amalthea_plural - # NOTE: Try to load in-cluster config first, if that fails try to load kube config - try: - InClusterConfigLoader( - token_filename=SERVICE_TOKEN_FILENAME, - cert_filename=SERVICE_CERT_FILENAME, - ).load_and_set() - except ConfigException: - load_config() - self._custom_objects = client.CustomObjectsApi(client.ApiClient()) - self._custom_objects_patch = client.CustomObjectsApi(client.ApiClient()) - self._custom_objects_patch.api_client.set_default_header("Content-Type", "application/json-patch+json") - self._core_v1 = client.CoreV1Api() - self._apps_v1 = client.AppsV1Api() - - def _get_container_logs( - self, pod_name: str, container_name: str, max_log_lines: Optional[int] = None - ) -> Optional[str]: - try: - logs = cast( - str, - self._core_v1.read_namespaced_pod_log( - pod_name, - self.namespace, - container=container_name, - tail_lines=max_log_lines, - timestamps=True, - ), - ) - except ApiException as err: - if err.status in [400, 404]: - return None # container does not exist or is not ready yet - else: - raise IntermittentError(f"Logs cannot be read for pod {pod_name}, container {container_name}.") - else: - return logs - - def get_pod_logs(self, name: str, containers: list[str], max_log_lines: Optional[int] = None) -> dict[str, str]: + self.server_type: type[_SessionType] = server_type + self._kr8s_type: type[_Kr8sType] = kr8s_type + if (self.server_type == AmaltheaSessionV1Alpha1 and self._kr8s_type == JupyterServerV1Alpha1Kr8s) or ( + self.server_type == JupyterServerV1Alpha1 and self._kr8s_type == AmaltheaSessionV1Alpha1Kr8s + ): + raise errors.ProgrammingError(message="Incompatible manifest and client types in k8s client") + self.sanitize = ApiClient().sanitize_for_serialization + + async def get_pod_logs(self, name: str, max_log_lines: Optional[int] = None) -> dict[str, str]: """Get the logs of all containers in the session.""" - output = {} + pod = cast(Pod, await Pod.get(name=name, namespace=self.namespace)) + logs: dict[str, str] = {} + containers = [container.name for container in pod.spec.containers + pod.spec.get("initContainers", [])] for container in containers: - logs = self._get_container_logs(pod_name=name, container_name=container, max_log_lines=max_log_lines) - if logs: - output[container] = logs - return output + try: + # NOTE: calling pod.logs without a container name set crashes the library + clogs: list[str] = [clog async for clog in pod.logs(container=container, tail_lines=max_log_lines)] + except NotFoundError: + raise errors.MissingResourceError(message=f"The session pod {name} does not exist.") + except ServerError as err: + if err.status == 404: + raise errors.MissingResourceError(message=f"The session pod {name} does not exist.") + raise + else: + logs[container] = "\n".join(clogs) + return logs - def get_secret(self, name: str) -> Optional[dict[str, Any]]: + async def get_secret(self, name: str) -> Secret | None: """Read a specific secret from the cluster.""" try: - secret = cast(dict[str, Any], self._core_v1.read_namespaced_secret(name, self.namespace)) - except client.rest.ApiException: + secret = await Secret.get(name, self.namespace) + except NotFoundError: return None return secret - def create_server(self, manifest: dict[str, Any]) -> dict[str, Any]: + async def create_server(self, manifest: _SessionType) -> _SessionType: """Create a jupyter server in the cluster.""" - server_name = manifest.get("metadata", {}).get("name") + # NOTE: You have to exclude none when using model dump below because otherwise we get + # namespace=null which seems to break the kr8s client or simply k8s does not translate + # namespace = null to the default namespace. + manifest.metadata.namespace = self.namespace + js = await self._kr8s_type(manifest.model_dump(exclude_none=True, mode="json")) + server_name = manifest.metadata.name try: - self._custom_objects.create_namespaced_custom_object( - group=self.amalthea_group, - version=self.amalthea_version, - namespace=self.namespace, - plural=self.amalthea_plural, - body=manifest, - ) - except ApiException as e: - logger.exception(f"Cannot start server {server_name} because of {e}") + await js.create() + except ServerError as e: + logging.exception(f"Cannot start server {server_name} because of {e}") raise CannotStartServerError( message=f"Cannot start the session {server_name}", ) + # NOTE: If refresh is not called then upon creating the object the status is blank + await js.refresh() # NOTE: We wait for the cache to sync with the newly created server # If not then the user will get a non-null response from the POST request but # then immediately after a null response because the newly created server has # not made it into the cache. With this we wait for the cache to catch up # before we send the response from the POST request out. Exponential backoff # is used to avoid overwhelming the cache. - server = retry_with_exponential_backoff(lambda x: x is None)(self.get_server)(server_name) + server = await retry_with_exponential_backoff_async(lambda x: x is None)(self.get_server)(server_name) if server is None: raise CannotStartServerError(message=f"Cannot start the session {server_name}") return server - def patch_server(self, server_name: str, patch: dict[str, Any] | list[dict[str, Any]]) -> dict[str, Any]: + async def patch_server(self, server_name: str, patch: dict[str, Any] | list[dict[str, Any]]) -> _SessionType: """Patch the server.""" + server = await self._kr8s_type(dict(metadata=dict(name=server_name, namespace=self.namespace))) + patch_type: str | None = None # rfc7386 patch + if isinstance(patch, list): + patch_type = "json" # rfc6902 patch try: - if isinstance(patch, list): # noqa: SIM108 - # NOTE: The _custom_objects_patch will only accept rfc6902 json-patch. - # We can recognize the type of patch because this is the only one that uses a list - client = self._custom_objects_patch - else: - # NOTE: The _custom_objects will accept the usual rfc7386 merge patches - client = self._custom_objects - - server = cast( - dict[str, Any], - client.patch_namespaced_custom_object( - group=self.amalthea_group, - version=self.amalthea_version, - namespace=self.namespace, - plural=self.amalthea_plural, - name=server_name, - body=patch, - ), - ) - - except ApiException as e: - logger.exception(f"Cannot patch server {server_name} because of {e}") + await server.patch(patch, type=patch_type) + except ServerError as e: + logging.exception(f"Cannot patch server {server_name} because of {e}") raise PatchServerError() - return server + return self.server_type.model_validate(server.to_dict()) - def patch_statefulset( - self, server_name: str, patch: dict[str, Any] | list[dict[str, Any]] | client.V1StatefulSet - ) -> client.V1StatefulSet | None: + async def patch_statefulset( + self, server_name: str, patch: dict[str, Any] | list[dict[str, Any]] + ) -> StatefulSet | None: """Patch a statefulset.""" + sts = await StatefulSet(dict(metadata=dict(name=server_name, namespace=self.namespace))) + patch_type: str | None = None # rfc7386 patch + if isinstance(patch, list): + patch_type = "json" # rfc6902 patch try: - ss = self._apps_v1.patch_namespaced_stateful_set( - server_name, - self.namespace, - patch, - ) - except ApiException as err: + await sts.patch(patch, type=patch_type) + except ServerError as err: if err.status == 404: # NOTE: It can happen potentially that another request or something else # deleted the session as this request was going on, in this case we ignore # the missing statefulset return None raise - return ss + return sts - def delete_server(self, server_name: str, forced: bool = False) -> Any: + async def delete_server(self, server_name: str) -> None: """Delete the server.""" + server = await self._kr8s_type(dict(metadata=dict(name=server_name, namespace=self.namespace))) try: - status = self._custom_objects.delete_namespaced_custom_object( - group=self.amalthea_group, - version=self.amalthea_version, - namespace=self.namespace, - plural=self.amalthea_plural, - name=server_name, - grace_period_seconds=0 if forced else None, - body=V1DeleteOptions(propagation_policy="Foreground"), - ) - except ApiException as e: - logger.exception(f"Cannot delete server {server_name} because of {e}") + await server.delete(propagation_policy="Foreground") + except ServerError as e: + logging.exception(f"Cannot delete server {server_name} because of {e}") raise DeleteServerError() - return status + return None - def get_server(self, name: str) -> Optional[dict[str, Any]]: + async def get_server(self, name: str) -> _SessionType | None: """Get a specific JupyterServer object.""" try: - js = cast( - dict[str, Any], - self._custom_objects.get_namespaced_custom_object( - name=name, - group=self.amalthea_group, - version=self.amalthea_version, - namespace=self.namespace, - plural=self.amalthea_plural, - ), - ) - except ApiException as err: + server = await self._kr8s_type.get(name=name, namespace=self.namespace) + except NotFoundError: + return None + except ServerError as err: if err.status not in [400, 404]: - logger.exception(f"Cannot get server {name} because of {err}") + logging.exception(f"Cannot get server {name} because of {err}") raise IntermittentError(f"Cannot get server {name} from the k8s API.") return None - return js + return self.server_type.model_validate(server.to_dict()) - def list_servers(self, label_selector: Optional[str] = None) -> list[dict[str, Any]]: + async def list_servers(self, label_selector: Optional[str] = None) -> list[_SessionType]: """Get a list of k8s jupyterserver objects for a specific user.""" try: - jss = self._custom_objects.list_namespaced_custom_object( - group=self.amalthea_group, - version=self.amalthea_version, - namespace=self.namespace, - plural=self.amalthea_plural, - label_selector=label_selector, - ) - except ApiException as err: + servers = await self._kr8s_type.list(namespace=self.namespace, label_selector=label_selector) + except ServerError as err: if err.status not in [400, 404]: - logger.exception(f"Cannot list servers because of {err}") + logging.exception(f"Cannot list servers because of {err}") raise IntermittentError(f"Cannot list servers from the k8s API with selector {label_selector}.") return [] - return cast(list[dict[str, Any]], jss.get("items", [])) + output: list[_SessionType] = ( + [self.server_type.model_validate(servers.to_dict())] + if isinstance(servers, APIObject) + else [self.server_type.model_validate(server.to_dict()) for server in servers] + ) + return output - def patch_image_pull_secret(self, server_name: str, gitlab_token: GitlabToken) -> None: + async def patch_image_pull_secret(self, server_name: str, gitlab_token: GitlabToken) -> None: """Patch the image pull secret used in a Renku session.""" secret_name = f"{server_name}-image-secret" try: - secret = self._core_v1.read_namespaced_secret(secret_name, self.namespace) - except ApiException as err: - if err.status == 404: - # NOTE: In many cases the session does not have an image pull secret - # this happens when the repo for the project is public so images are public - return - raise - old_docker_config = json.loads(base64.b64decode(secret.data[".dockerconfigjson"]).decode()) + secret = cast(Secret, await Secret.get(name=secret_name, namespace=self.namespace)) + except NotFoundError: + return None + secret_data = secret.data.to_dict() + old_docker_config = json.loads(base64.b64decode(secret_data[".dockerconfigjson"]).decode()) hostname = next(iter(old_docker_config["auths"].keys()), None) if not hostname: raise ProgrammingError( @@ -259,26 +234,19 @@ def patch_image_pull_secret(self, server_name: str, gitlab_token: GitlabToken) - "value": base64.b64encode(json.dumps(new_docker_config).encode()).decode(), } ] - self._core_v1.patch_namespaced_secret( - secret_name, - self.namespace, - patch, - ) + await secret.patch(patch, type="json") - def patch_statefulset_tokens(self, name: str, renku_tokens: RenkuTokens) -> None: + async def patch_statefulset_tokens(self, name: str, renku_tokens: RenkuTokens) -> None: """Patch the Renku and Gitlab access tokens that are used in the session statefulset.""" try: - sts = self._apps_v1.read_namespaced_stateful_set(name, self.namespace) - except ApiException as err: - if err.status == 404: - # NOTE: It can happen potentially that another request or something else - # deleted the session as this request was going on, in this case we ignore - # the missing statefulset - return - raise + sts = cast(StatefulSet, await StatefulSet.get(name=name, namespace=self.namespace)) + except NotFoundError: + return None - containers: list[V1Container] = sts.spec.template.spec.containers - init_containers: list[V1Container] = sts.spec.template.spec.init_containers + containers: list[V1Container] = [V1Container(**container) for container in sts.spec.template.spec.containers] + init_containers: list[V1Container] = [ + V1Container(**container) for container in sts.spec.template.spec.init_containers + ] git_proxy_container_index, git_proxy_container = next( ((i, c) for i, c in enumerate(containers) if c.name == "git-proxy"), @@ -359,50 +327,64 @@ def patch_statefulset_tokens(self, name: str, renku_tokens: RenkuTokens) -> None ) if not patches: - return + return None - self._apps_v1.patch_namespaced_stateful_set( - name, - self.namespace, - patches, - ) + await sts.patch(patches, type="json") + + async def create_secret(self, secret: V1Secret) -> V1Secret: + """Create a new secret.""" + + new_secret = await Secret(self.sanitize(secret), self.namespace) + await new_secret.create() + return V1Secret(metadata=new_secret.metadata, data=new_secret.data, type=new_secret.raw.get("type")) + async def delete_secret(self, name: str) -> None: + """Delete a secret.""" + secret = await Secret(dict(metadata=dict(name=name, namespace=self.namespace))) + with suppress(NotFoundError): + await secret.delete() + return None -class JsServerCache: + +class ServerCache(Generic[_SessionType]): """Utility class for calling the jupyter server cache.""" - def __init__(self, url: str): + def __init__(self, url: str, server_type: type[_SessionType]): self.url = url + self.client = httpx.AsyncClient() + self.server_type: type[_SessionType] = server_type + self.url_path_name = "servers" + if server_type == AmaltheaSessionV1Alpha1: + self.url_path_name = "sessions" - def list_servers(self, safe_username: str) -> list[dict[str, Any]]: + async def list_servers(self, safe_username: str) -> list[_SessionType]: """List the jupyter servers.""" - url = urljoin(self.url, f"/users/{safe_username}/servers") + url = urljoin(self.url, f"/users/{safe_username}/{self.url_path_name}") try: - res = requests.get(url, timeout=10) - res.raise_for_status() - except requests.HTTPError as err: - logger.warning( + res = await self.client.get(url, timeout=10) + except httpx.RequestError as err: + logging.warning(f"Jupyter server cache at {url} cannot be reached: {err}") + raise JSCacheError("The jupyter server cache is not available") + if res.status_code != 200: + logging.warning( f"Listing servers at {url} from " f"jupyter server cache failed with status code: {res.status_code} " - f"and error: {err}" + f"and body: {res.text}" ) - raise JSCacheError(f"The JSCache produced an unexpected status code: {err}") from err - except requests.RequestException as err: - logger.warning(f"Jupyter server cache at {url} cannot be reached: {err}") - raise JSCacheError("The jupyter server cache is not available") from err + raise JSCacheError(f"The JSCache produced an unexpected status code: {res.status_code}") - return cast(list[dict[str, Any]], res.json()) + return [self.server_type.model_validate(server) for server in res.json()] - def get_server(self, name: str) -> Optional[dict[str, Any]]: + async def get_server(self, name: str) -> _SessionType | None: """Get a specific jupyter server.""" - url = urljoin(self.url, f"/servers/{name}") + url = urljoin(self.url, f"/{self.url_path_name}/{name}") try: - res = requests.get(url, timeout=10) - except requests.exceptions.RequestException as err: - logger.warning(f"Jupyter server cache at {url} cannot be reached: {err}") + res = await self.client.get(url, timeout=10) + except httpx.RequestError as err: + logging.warning(f"Jupyter server cache at {url} cannot be reached: {err}") raise JSCacheError("The jupyter server cache is not available") if res.status_code != 200: - logger.warning( + logging.warning( f"Reading server at {url} from " f"jupyter server cache failed with status code: {res.status_code} " f"and body: {res.text}" @@ -413,163 +395,120 @@ def get_server(self, name: str) -> Optional[dict[str, Any]]: return None if len(output) > 1: raise ProgrammingError(f"Expected to find 1 server when getting server {name}, " f"found {len(output)}.") - return cast(dict[str, Any], output[0]) + return self.server_type.model_validate(output[0]) -class K8sClient: +class K8sClient(Generic[_SessionType, _Kr8sType]): """The K8s client that combines a namespaced client and a jupyter server cache.""" def __init__( self, - js_cache: JsServerCache, - renku_ns_client: NamespacedK8sClient, + cache: ServerCache[_SessionType], + renku_ns_client: NamespacedK8sClient[_SessionType, _Kr8sType], username_label: str, - session_ns_client: Optional[NamespacedK8sClient] = None, ): - self.js_cache = js_cache - self.renku_ns_client = renku_ns_client + self.cache: ServerCache[_SessionType] = cache + self.renku_ns_client: NamespacedK8sClient[_SessionType, _Kr8sType] = renku_ns_client self.username_label = username_label - self.session_ns_client = session_ns_client if not self.username_label: raise ProgrammingError("username_label has to be provided to K8sClient") + self.sanitize = self.renku_ns_client.sanitize - def list_servers(self, safe_username: str) -> list[dict[str, Any]]: + async def list_servers(self, safe_username: str) -> list[_SessionType]: """Get a list of servers that belong to a user. Attempt to use the cache first but if the cache fails then use the k8s API. """ try: - return self.js_cache.list_servers(safe_username) + return await self.cache.list_servers(safe_username) except JSCacheError: - logger.warning(f"Skipping the cache to list servers for user: {safe_username}") + logging.warning(f"Skipping the cache to list servers for user: {safe_username}") label_selector = f"{self.username_label}={safe_username}" - return self.renku_ns_client.list_servers(label_selector) + ( - self.session_ns_client.list_servers(label_selector) if self.session_ns_client is not None else [] - ) + return await self.renku_ns_client.list_servers(label_selector) - def get_server(self, name: str, safe_username: str) -> Optional[dict[str, Any]]: + async def get_server(self, name: str, safe_username: str) -> _SessionType | None: """Attempt to get a specific server by name from the cache. If the request to the cache fails, fallback to the k8s API. """ server = None try: - server = self.js_cache.get_server(name) + server = await self.cache.get_server(name) except JSCacheError: - output = [] - res = None - if self.session_ns_client is not None: - res = self.session_ns_client.get_server(name) - if res: - output.append(res) - res = self.renku_ns_client.get_server(name) - if res: - output.append(res) - if len(output) > 1: - raise ProgrammingError( - "Expected less than two results for searching for " f"server {name}, but got {len(output)}" - ) - if len(output) == 0: - return None - server = output[0] + server = await self.renku_ns_client.get_server(name) - if server and server.get("metadata", {}).get("labels", {}).get(self.username_label) != safe_username: + if server and server.metadata and server.metadata.labels.get(self.username_label) != safe_username: return None return server - def get_server_logs( + async def get_server_logs( self, server_name: str, safe_username: str, max_log_lines: Optional[int] = None ) -> dict[str, str]: """Get the logs from the server.""" - server = self.get_server(server_name, safe_username) - if server is None: + # NOTE: this get_server ensures the user has access to the server without it you could read someone elses logs + server = await self.get_server(server_name, safe_username) + if not server: raise MissingResourceError( - f"Cannot find server {server_name} for user {safe_username} to read the logs from." + f"Cannot find server {server_name} for user " f"{safe_username} to retrieve logs." ) - containers = list(server.get("status", {}).get("containerStates", {}).get("init", {}).keys()) + list( - server.get("status", {}).get("containerStates", {}).get("regular", {}).keys() - ) - namespace = server.get("metadata", {}).get("namespace") pod_name = f"{server_name}-0" - if namespace == self.renku_ns_client.namespace: - return self.renku_ns_client.get_pod_logs(pod_name, containers, max_log_lines) - if self.session_ns_client is None: - raise MissingResourceError( - f"Cannot find server {server_name} for user {safe_username} to read the logs from." - ) - return self.session_ns_client.get_pod_logs(pod_name, containers, max_log_lines) + return await self.renku_ns_client.get_pod_logs(pod_name, max_log_lines) - def get_secret(self, name: str) -> Optional[dict[str, Any]]: + async def _get_secret(self, name: str) -> Secret | None: """Get a specific secret.""" - if self.session_ns_client is not None: - secret = self.session_ns_client.get_secret(name) - if secret: - return secret - return self.renku_ns_client.get_secret(name) + return await self.renku_ns_client.get_secret(name) - def create_server(self, manifest: dict[str, Any], safe_username: str) -> dict[str, Any]: + async def create_server(self, manifest: _SessionType, safe_username: str) -> _SessionType: """Create a server.""" - server_name = manifest.get("metadata", {}).get("name") - server = self.get_server(server_name, safe_username) + server_name = manifest.metadata.name + server = await self.get_server(server_name, safe_username) if server: # NOTE: server already exists return server - if not self.session_ns_client: - return self.renku_ns_client.create_server(manifest) - return self.session_ns_client.create_server(manifest) + manifest.metadata.labels[self.username_label] = safe_username + return await self.renku_ns_client.create_server(manifest) - def patch_server( + async def patch_server( self, server_name: str, safe_username: str, patch: dict[str, Any] | list[dict[str, Any]] - ) -> dict[str, Any]: + ) -> _SessionType: """Patch a server.""" - server = self.get_server(server_name, safe_username) + server = await self.get_server(server_name, safe_username) if not server: raise MissingResourceError( f"Cannot find server {server_name} for user " f"{safe_username} in order to patch it." ) + return await self.renku_ns_client.patch_server(server_name=server_name, patch=patch) - namespace = server.get("metadata", {}).get("namespace") - - if namespace == self.renku_ns_client.namespace: - return self.renku_ns_client.patch_server(server_name=server_name, patch=patch) - if self.session_ns_client is None: - raise MissingResourceError( - f"Cannot find server {server_name} for user " f"{safe_username} in order to patch it." - ) - return self.session_ns_client.patch_server(server_name=server_name, patch=patch) - - def patch_statefulset( + async def patch_statefulset( self, server_name: str, patch: dict[str, Any] | list[dict[str, Any]] - ) -> client.V1StatefulSet | None: + ) -> StatefulSet | None: """Patch a statefulset.""" - client = self.session_ns_client if self.session_ns_client else self.renku_ns_client - return client.patch_statefulset(server_name=server_name, patch=patch) + client = self.renku_ns_client + return await client.patch_statefulset(server_name=server_name, patch=patch) - def delete_server(self, server_name: str, safe_username: str, forced: bool = False) -> None: + async def delete_server(self, server_name: str, safe_username: str) -> None: """Delete the server.""" - server = self.get_server(server_name, safe_username) + server = await self.get_server(server_name, safe_username) if not server: - raise MissingResourceError( - f"Cannot find server {server_name} for user " f"{safe_username} in order to delete it." - ) - namespace = server.get("metadata", {}).get("namespace") - if namespace == self.renku_ns_client.namespace: - self.renku_ns_client.delete_server(server_name, forced) - if self.session_ns_client is None: - raise MissingResourceError( - f"Cannot find server {server_name} for user " f"{safe_username} in order to delete it." - ) - self.session_ns_client.delete_server(server_name, forced) + return None + await self.renku_ns_client.delete_server(server_name) + return None - def patch_tokens(self, server_name: str, renku_tokens: RenkuTokens, gitlab_token: GitlabToken) -> None: + async def patch_tokens(self, server_name: str, renku_tokens: RenkuTokens, gitlab_token: GitlabToken) -> None: """Patch the Renku and Gitlab access tokens used in a session.""" - client = self.session_ns_client if self.session_ns_client else self.renku_ns_client - client.patch_statefulset_tokens(server_name, renku_tokens) - client.patch_image_pull_secret(server_name, gitlab_token) + client = self.renku_ns_client + await client.patch_statefulset_tokens(server_name, renku_tokens) + await client.patch_image_pull_secret(server_name, gitlab_token) @property def preferred_namespace(self) -> str: """Get the preferred namespace for creating jupyter servers.""" - if self.session_ns_client is not None: - return self.session_ns_client.namespace return self.renku_ns_client.namespace + + async def create_secret(self, secret: V1Secret) -> V1Secret: + """Create a secret.""" + return await self.renku_ns_client.create_secret(secret) + + async def delete_secret(self, name: str) -> None: + """Delete a secret.""" + return await self.renku_ns_client.delete_secret(name) diff --git a/components/renku_data_services/notebooks/api/classes/server.py b/components/renku_data_services/notebooks/api/classes/server.py index 50b1de0f4..3add6cec6 100644 --- a/components/renku_data_services/notebooks/api/classes/server.py +++ b/components/renku_data_services/notebooks/api/classes/server.py @@ -3,29 +3,32 @@ from abc import ABC from collections.abc import Sequence from itertools import chain -from pathlib import Path +from pathlib import PurePosixPath from typing import Any from urllib.parse import urljoin, urlparse +from gitlab.v4.objects.projects import Project from sanic.log import logger -from ...config import _NotebooksConfig -from ...errors.programming import ConfigurationError, DuplicateEnvironmentVariableError -from ...errors.user import MissingResourceError -from ..amalthea_patches import cloudstorage as cloudstorage_patches -from ..amalthea_patches import general as general_patches -from ..amalthea_patches import git_proxy as git_proxy_patches -from ..amalthea_patches import git_sidecar as git_sidecar_patches -from ..amalthea_patches import init_containers as init_containers_patches -from ..amalthea_patches import inject_certificates as inject_certificates_patches -from ..amalthea_patches import jupyter_server as jupyter_server_patches -from ..amalthea_patches import ssh as ssh_patches -from ..schemas.secrets import K8sUserSecrets -from ..schemas.server_options import ServerOptions -from .cloud_storage import ICloudStorageRequest -from .k8s_client import K8sClient -from .repository import GitProvider, Repository -from .user import AnonymousUser, RegisteredUser +from renku_data_services.base_models import AnonymousAPIUser, AuthenticatedAPIUser +from renku_data_services.base_models.core import APIUser +from renku_data_services.notebooks.api.amalthea_patches import cloudstorage as cloudstorage_patches +from renku_data_services.notebooks.api.amalthea_patches import general as general_patches +from renku_data_services.notebooks.api.amalthea_patches import git_proxy as git_proxy_patches +from renku_data_services.notebooks.api.amalthea_patches import git_sidecar as git_sidecar_patches +from renku_data_services.notebooks.api.amalthea_patches import init_containers as init_containers_patches +from renku_data_services.notebooks.api.amalthea_patches import inject_certificates as inject_certificates_patches +from renku_data_services.notebooks.api.amalthea_patches import jupyter_server as jupyter_server_patches +from renku_data_services.notebooks.api.amalthea_patches import ssh as ssh_patches +from renku_data_services.notebooks.api.classes.cloud_storage import ICloudStorageRequest +from renku_data_services.notebooks.api.classes.k8s_client import JupyterServerV1Alpha1Kr8s, K8sClient +from renku_data_services.notebooks.api.classes.repository import GitProvider, Repository +from renku_data_services.notebooks.api.schemas.secrets import K8sUserSecrets +from renku_data_services.notebooks.api.schemas.server_options import ServerOptions +from renku_data_services.notebooks.config import NotebooksConfig +from renku_data_services.notebooks.crs import JupyterServerV1Alpha1 +from renku_data_services.notebooks.errors.programming import DuplicateEnvironmentVariableError +from renku_data_services.notebooks.errors.user import MissingResourceError class UserServer(ABC): @@ -33,7 +36,7 @@ class UserServer(ABC): def __init__( self, - user: AnonymousUser | RegisteredUser, + user: AnonymousAPIUser | AuthenticatedAPIUser, server_name: str, image: str | None, server_options: ServerOptions, @@ -41,18 +44,18 @@ def __init__( user_secrets: K8sUserSecrets | None, cloudstorage: Sequence[ICloudStorageRequest], k8s_client: K8sClient, - workspace_mount_path: Path, - work_dir: Path, - config: _NotebooksConfig, + workspace_mount_path: PurePosixPath, + work_dir: PurePosixPath, + config: NotebooksConfig, + internal_gitlab_user: APIUser, using_default_image: bool = False, is_image_private: bool = False, repositories: list[Repository] | None = None, ): - self._check_flask_config() self._user = user self.server_name = server_name - self._k8s_client: K8sClient = k8s_client - self.safe_username = self._user.safe_username + self._k8s_client: K8sClient[JupyterServerV1Alpha1, JupyterServerV1Alpha1Kr8s] = k8s_client + self.safe_username = self._user.id self.image = image self.server_options = server_options self.environment_variables = environment_variables @@ -63,13 +66,14 @@ def __init__( self.cloudstorage = cloudstorage self.is_image_private = is_image_private self.config = config + self.internal_gitlab_user = internal_gitlab_user if self.server_options.idle_threshold_seconds is not None: self.idle_seconds_threshold = self.server_options.idle_threshold_seconds else: self.idle_seconds_threshold = ( config.sessions.culling.registered.idle_seconds - if isinstance(self._user, RegisteredUser) + if isinstance(self._user, AuthenticatedAPIUser) else config.sessions.culling.anonymous.idle_seconds ) @@ -78,7 +82,7 @@ def __init__( else: self.hibernated_seconds_threshold = ( config.sessions.culling.registered.hibernated_seconds - if isinstance(user, RegisteredUser) + if isinstance(user, AuthenticatedAPIUser) else config.sessions.culling.anonymous.hibernated_seconds ) self._repositories: list[Repository] = repositories or [] @@ -86,7 +90,7 @@ def __init__( self._has_configured_git_providers = False @property - def user(self) -> AnonymousUser | RegisteredUser: + def user(self) -> AnonymousAPIUser | AuthenticatedAPIUser: """Getter for server's user.""" return self._user @@ -95,14 +99,14 @@ def k8s_client(self) -> K8sClient: """Return server's k8s client.""" return self._k8s_client - @property - def repositories(self) -> list[Repository]: + async def repositories(self) -> list[Repository]: """Get the list of repositories in the project.""" # Configure git repository providers based on matching URLs. if not self._has_configured_git_providers: + git_providers = await self.git_providers() for repo in self._repositories: found_provider = None - for provider in self.git_providers: + for provider in git_providers: if urlparse(provider.url).netloc == urlparse(repo.url).netloc: found_provider = provider break @@ -115,33 +119,33 @@ def repositories(self) -> list[Repository]: @property def server_url(self) -> str: """The URL where a user can access their session.""" - if type(self._user) is RegisteredUser: + if self._user.is_authenticated: return urljoin( f"https://{self.config.sessions.ingress.host}", f"sessions/{self.server_name}", ) return urljoin( f"https://{self.config.sessions.ingress.host}", - f"sessions/{self.server_name}?token={self._user.username}", + f"sessions/{self.server_name}?token={self._user.id}", ) - @property - def git_providers(self) -> list[GitProvider]: + async def git_providers(self) -> list[GitProvider]: """The list of git providers.""" if self._git_providers is None: - self._git_providers = self.config.git_provider_helper.get_providers(user=self.user) + self._git_providers = await self.config.git_provider_helper.get_providers(user=self.user) return self._git_providers - @property - def required_git_providers(self) -> list[GitProvider]: + async def required_git_providers(self) -> list[GitProvider]: """The list of required git providers.""" - required_provider_ids: set[str] = set(r.provider for r in self.repositories if r.provider) - return [p for p in self.git_providers if p.id in required_provider_ids] + repositories = await self.repositories() + required_provider_ids: set[str] = set(r.provider for r in repositories if r.provider) + providers = await self.git_providers() + return [p for p in providers if p.id in required_provider_ids] def __str__(self) -> str: - return f"" + return f"" - def start(self) -> dict[str, Any] | None: + async def start(self) -> JupyterServerV1Alpha1 | None: """Create the jupyterserver resource in k8s.""" errors = self._get_start_errors() if errors: @@ -151,19 +155,9 @@ def start(self) -> dict[str, Any] | None: f"or Docker resources are missing: {', '.join(errors)}" ) ) - return self._k8s_client.create_server(self._get_session_manifest(), self.safe_username) - - def _check_flask_config(self) -> None: - """Check the app config and ensure minimum required parameters are present.""" - if self.config.git.url is None: - raise ConfigurationError( - message="The gitlab URL is missing, it must be provided in an environment variable called GITLAB_URL" - ) - if self.config.git.registry is None: - raise ConfigurationError( - message="The url to the docker image registry is missing, it must be provided in " - "an environment variable called IMAGE_REGISTRY" - ) + session_manifest = await self._get_session_manifest() + manifest = JupyterServerV1Alpha1.model_validate(session_manifest) + return await self._k8s_client.create_server(manifest, self.safe_username) @staticmethod def _check_environment_variables_overrides(patches_list: list[dict[str, Any]]) -> None: @@ -199,9 +193,9 @@ def _get_start_errors(self) -> list[str]: errors.append(f"image {self.image} does not exist or cannot be accessed") return errors - def _get_session_manifest(self) -> dict[str, Any]: + async def _get_session_manifest(self) -> dict[str, Any]: """Compose the body of the user session for the k8s operator.""" - patches = self._get_patches() + patches = await self._get_patches() self._check_environment_variables_overrides(patches) # Storage @@ -211,7 +205,7 @@ def _get_session_manifest(self) -> dict[str, Any]: "pvc": { "enabled": True, "storageClassName": self.config.sessions.storage.pvs_storage_class, - "mountPath": self.workspace_mount_path.absolute().as_posix(), + "mountPath": self.workspace_mount_path.as_posix(), }, } else: @@ -220,24 +214,24 @@ def _get_session_manifest(self) -> dict[str, Any]: "size": storage_size, "pvc": { "enabled": False, - "mountPath": self.workspace_mount_path.absolute().as_posix(), + "mountPath": self.workspace_mount_path.as_posix(), }, } # Authentication - if isinstance(self._user, RegisteredUser): + if isinstance(self._user, AuthenticatedAPIUser): session_auth = { "token": "", "oidc": { "enabled": True, "clientId": self.config.sessions.oidc.client_id, "clientSecret": {"value": self.config.sessions.oidc.client_secret}, - "issuerUrl": self._user.oidc_issuer, + "issuerUrl": self.config.sessions.oidc.issuer_url, "authorizedEmails": [self._user.email], }, } else: session_auth = { - "token": self._user.username, + "token": self._user.id, "oidc": {"enabled": False}, } # Combine everything into the manifest @@ -255,7 +249,7 @@ def _get_session_manifest(self) -> dict[str, Any]: "idleSecondsThreshold": self.idle_seconds_threshold, "maxAgeSecondsThreshold": ( self.config.sessions.culling.registered.max_age_seconds - if isinstance(self._user, RegisteredUser) + if isinstance(self._user, AuthenticatedAPIUser) else self.config.sessions.culling.anonymous.max_age_seconds ), "hibernatedSecondsThreshold": self.hibernated_seconds_threshold, @@ -263,7 +257,7 @@ def _get_session_manifest(self) -> dict[str, Any]: "jupyterServer": { "defaultUrl": self.server_options.default_url, "image": self.image, - "rootDir": self.work_dir.absolute().as_posix(), + "rootDir": self.work_dir.as_posix(), "resources": self.server_options.to_k8s_resources( enforce_cpu_limits=self.config.sessions.enforce_cpu_limits ), @@ -286,7 +280,7 @@ def _get_session_manifest(self) -> dict[str, Any]: def _get_renku_annotation_prefix(self) -> str: return self.config.session_get_endpoint_annotations.renku_annotation_prefix - def _get_patches(self) -> list[dict[str, Any]]: + async def _get_patches(self) -> list[dict[str, Any]]: return list( chain( general_patches.test(self), @@ -297,25 +291,25 @@ def _get_patches(self) -> list[dict[str, Any]]: general_patches.dev_shm(self), jupyter_server_patches.args(), jupyter_server_patches.env(self), - jupyter_server_patches.image_pull_secret(self), + jupyter_server_patches.image_pull_secret(self, self.internal_gitlab_user.access_token), jupyter_server_patches.disable_service_links(), jupyter_server_patches.rstudio_env_variables(self), jupyter_server_patches.user_secrets(self), - git_proxy_patches.main(self), - git_sidecar_patches.main(self), + await git_proxy_patches.main(self), + await git_sidecar_patches.main(self), general_patches.oidc_unverified_email(self), ssh_patches.main(self.config), # init container for certs must come before all other init containers # so that it runs first before all other init containers init_containers_patches.certificates(self.config), init_containers_patches.download_image(self), - init_containers_patches.git_clone(self), + await init_containers_patches.git_clone(self), inject_certificates_patches.proxy(self), # Cloud Storage needs to patch the git clone sidecar spec and so should come after # the sidecars # WARN: this patch depends on the index of the sidecar and so needs to be updated # if sidercars are added or removed - cloudstorage_patches.main(self), + await cloudstorage_patches.main(self), ) ) @@ -328,7 +322,9 @@ def get_labels(self) -> dict[str, str | None]: f"{prefix}commit-sha": None, f"{prefix}gitlabProjectId": None, f"{prefix}safe-username": self.safe_username, - f"{prefix}quota": self.server_options.priority_class, + f"{prefix}quota": self.server_options.priority_class + if self.server_options.priority_class is not None + else "", f"{prefix}userId": self._user.id, } return labels @@ -339,8 +335,8 @@ def get_annotations(self) -> dict[str, str | None]: annotations = { f"{prefix}commit-sha": None, f"{prefix}gitlabProjectId": None, - f"{prefix}safe-username": self._user.safe_username, - f"{prefix}username": self._user.username, + f"{prefix}safe-username": self._user.id, + f"{prefix}username": self._user.id, f"{prefix}userId": self._user.id, f"{prefix}servername": self.server_name, f"{prefix}branch": None, @@ -369,7 +365,7 @@ class Renku1UserServer(UserServer): def __init__( self, - user: AnonymousUser | RegisteredUser, + user: AnonymousAPIUser | AuthenticatedAPIUser, server_name: str, namespace: str, project: str, @@ -382,22 +378,26 @@ def __init__( user_secrets: K8sUserSecrets | None, cloudstorage: Sequence[ICloudStorageRequest], k8s_client: K8sClient, - workspace_mount_path: Path, - work_dir: Path, - config: _NotebooksConfig, + workspace_mount_path: PurePosixPath, + work_dir: PurePosixPath, + config: NotebooksConfig, + gitlab_project: Project | None, + internal_gitlab_user: APIUser, using_default_image: bool = False, is_image_private: bool = False, + **_: dict, ): - gitlab_project_name = f"{namespace}/{project}" - gitlab_project = user.get_renku_project(gitlab_project_name) + self.gitlab_project = gitlab_project + self.internal_gitlab_user = internal_gitlab_user + self.gitlab_project_name = f"{namespace}/{project}" single_repository = ( Repository( - url=gitlab_project.http_url_to_repo, - dirname=gitlab_project.path, + url=self.gitlab_project.http_url_to_repo, + dirname=self.gitlab_project.path, branch=branch, commit_sha=commit_sha, ) - if gitlab_project is not None + if self.gitlab_project is not None else None ) @@ -416,6 +416,7 @@ def __init__( is_image_private=is_image_private, repositories=[single_repository] if single_repository is not None else [], config=config, + internal_gitlab_user=internal_gitlab_user, ) self.namespace = namespace @@ -424,8 +425,6 @@ def __init__( self.commit_sha = commit_sha self.notebook = notebook self.git_host = urlparse(config.git.url).netloc - self.gitlab_project_name = gitlab_project_name - self.gitlab_project = gitlab_project self.single_repository = single_repository def _get_start_errors(self) -> list[str]: @@ -494,7 +493,7 @@ class Renku2UserServer(UserServer): def __init__( self, - user: AnonymousUser | RegisteredUser, + user: AnonymousAPIUser | AuthenticatedAPIUser, image: str, project_id: str, launcher_id: str, @@ -504,12 +503,14 @@ def __init__( user_secrets: K8sUserSecrets | None, cloudstorage: Sequence[ICloudStorageRequest], k8s_client: K8sClient, - workspace_mount_path: Path, - work_dir: Path, + workspace_mount_path: PurePosixPath, + work_dir: PurePosixPath, repositories: list[Repository], - config: _NotebooksConfig, + config: NotebooksConfig, + internal_gitlab_user: APIUser, using_default_image: bool = False, is_image_private: bool = False, + **_: dict, ): super().__init__( user=user, @@ -526,11 +527,23 @@ def __init__( is_image_private=is_image_private, repositories=repositories, config=config, + internal_gitlab_user=internal_gitlab_user, ) self.project_id = project_id self.launcher_id = launcher_id + def get_labels(self) -> dict[str, str | None]: + """Get the labels of the jupyter server.""" + prefix = self._get_renku_annotation_prefix() + labels = super().get_labels() + + # for validation purpose + for item in ["commit-sha", "gitlabProjectId"]: + labels[f"{prefix}{item}"] = "" + + return labels + def get_annotations(self) -> dict[str, str | None]: """Get the annotations of the session.""" prefix = self._get_renku_annotation_prefix() @@ -538,4 +551,9 @@ def get_annotations(self) -> dict[str, str | None]: annotations[f"{prefix}renkuVersion"] = "2.0" annotations[f"{prefix}projectId"] = self.project_id annotations[f"{prefix}launcherId"] = self.launcher_id + + # for validation purpose + for item in ["commit-sha", "branch", "git-host", "namespace", "projectName", "gitlabProjectId", "repository"]: + annotations[f"{prefix}{item}"] = "" + return annotations diff --git a/components/renku_data_services/notebooks/api/classes/server_manifest.py b/components/renku_data_services/notebooks/api/classes/server_manifest.py index 0a220b430..89eed4572 100644 --- a/components/renku_data_services/notebooks/api/classes/server_manifest.py +++ b/components/renku_data_services/notebooks/api/classes/server_manifest.py @@ -4,13 +4,15 @@ import json from typing import Any, Optional, cast -from .cloud_storage.existing import ExistingCloudStorage +from renku_data_services.errors import errors +from renku_data_services.notebooks.api.classes.cloud_storage.existing import ExistingCloudStorage +from renku_data_services.notebooks.crs import JupyterServerV1Alpha1 class UserServerManifest: """Thin wrapper around a jupyter server manifest.""" - def __init__(self, manifest: dict[str, Any], default_image: str, pvs_enabled: bool = True) -> None: + def __init__(self, manifest: JupyterServerV1Alpha1, default_image: str, pvs_enabled: bool = True) -> None: self.manifest = manifest self.default_image = default_image self.pvs_enabled = pvs_enabled @@ -18,12 +20,14 @@ def __init__(self, manifest: dict[str, Any], default_image: str, pvs_enabled: bo @property def name(self) -> str: """The name of the server.""" - return cast(str, self.manifest["metadata"]["name"]) + return self.manifest.metadata.name @property def image(self) -> str: """The image the server is running.""" - return cast(str, self.manifest["spec"]["jupyterServer"]["image"]) + if self.manifest.spec is None: + raise errors.ProgrammingError(message="Unexpected manifest format") + return self.manifest.spec.jupyterServer.image @property def using_default_image(self) -> bool: @@ -31,14 +35,16 @@ def using_default_image(self) -> bool: return self.image == self.default_image @property - def server_options(self) -> dict[str, Any]: + def server_options(self) -> dict[str, str | int | float]: """Extract the server options from a manifest.""" js = self.manifest - server_options = {} + if js.spec is None: + raise errors.ProgrammingError(message="Unexpected manifest format") + server_options: dict[str, str | int | float] = {} # url - server_options["defaultUrl"] = js["spec"]["jupyterServer"]["defaultUrl"] + server_options["defaultUrl"] = js.spec.jupyterServer.defaultUrl # disk - server_options["disk_request"] = js["spec"]["storage"].get("size") + server_options["disk_request"] = js.spec.storage.size # NOTE: Amalthea accepts only strings for disk request, but k8s allows bytes as number # so try to convert to number if possible with contextlib.suppress(ValueError): @@ -50,7 +56,7 @@ def server_options(self) -> dict[str, Any]: "cpu": "cpu_request", "ephemeral-storage": "ephemeral-storage", } - js_resources = js["spec"]["jupyterServer"]["resources"]["requests"] + js_resources = js.spec.jupyterServer.resources["requests"] for k8s_res_name in k8s_res_name_xref: if k8s_res_name in js_resources: server_options[k8s_res_name_xref[k8s_res_name]] = js_resources[k8s_res_name] @@ -60,8 +66,8 @@ def server_options(self) -> dict[str, Any]: server_options["ephemeral-storage"] if self.pvs_enabled else server_options["disk_request"] ) # lfs auto fetch - for patches in js["spec"]["patches"]: - for patch in patches.get("patch", []): + for patches in js.spec.patches: + for patch in cast(dict, patches.patch): if patch.get("path") == "/statefulset/spec/template/spec/initContainers/-": for env in patch.get("value", {}).get("env", []): if env.get("name") == "GIT_CLONE_LFS_AUTO_FETCH": @@ -71,12 +77,12 @@ def server_options(self) -> dict[str, Any]: @property def annotations(self) -> dict[str, str]: """Extract the manifest annotations.""" - return cast(dict[str, str], self.manifest["metadata"]["annotations"]) + return self.manifest.metadata.annotations @property def labels(self) -> dict[str, str]: """Extract the manifest labels.""" - return cast(dict[str, str], self.manifest["metadata"]["labels"]) + return self.manifest.metadata.labels @property def cloudstorage(self) -> list[ExistingCloudStorage]: @@ -86,12 +92,12 @@ def cloudstorage(self) -> list[ExistingCloudStorage]: @property def server_name(self) -> str: """Get the server name.""" - return cast(str, self.manifest["metadata"]["name"]) + return self.manifest.metadata.name @property def hibernation(self) -> Optional[dict[str, Any]]: """Return hibernation annotation.""" - hibernation = self.manifest["metadata"]["annotations"].get("hibernation") + hibernation = self.manifest.metadata.annotations.get("renku.io/hibernation") return json.loads(hibernation) if hibernation else None @property @@ -120,9 +126,11 @@ def hibernation_branch(self) -> Optional[str]: @property def url(self) -> str: """Return the url where the user can access the session.""" - host = self.manifest["spec"]["routing"]["host"] - path = self.manifest["spec"]["routing"]["path"].rstrip("/") - token = self.manifest["spec"]["auth"].get("token", "") + if self.manifest.spec is None: + raise errors.ProgrammingError(message="Unexpected manifest format") + host = self.manifest.spec.routing.host + path = self.manifest.spec.routing.path.rstrip("/") + token = self.manifest.spec.auth.token or "" url = f"https://{host}{path}" if token and len(token) > 0: url += f"?token={token}" diff --git a/components/renku_data_services/notebooks/api/classes/user.py b/components/renku_data_services/notebooks/api/classes/user.py index a760557d2..0700759a1 100644 --- a/components/renku_data_services/notebooks/api/classes/user.py +++ b/components/renku_data_services/notebooks/api/classes/user.py @@ -1,112 +1,18 @@ """Notebooks user model definitions.""" -import base64 -import json -import re from functools import lru_cache -from math import floor -from typing import Any, Optional, Protocol, cast -import escapism -import jwt from gitlab import Gitlab from gitlab.v4.objects.projects import Project from gitlab.v4.objects.users import CurrentUser from sanic.log import logger -from ...errors.user import AuthenticationError +class NotebooksGitlabClient: + """Client for gitlab to be used only in the notebooks, will be eventually eliminated.""" -class User(Protocol): - """Representation of a user that is calling the API.""" - - access_token: str | None = None - git_token: str | None = None - gitlab_client: Gitlab - username: str - - @lru_cache(maxsize=8) - def get_renku_project(self, namespace_project: str) -> Optional[Project]: - """Retrieve the GitLab project.""" - try: - return self.gitlab_client.projects.get(f"{namespace_project}") - except Exception as e: - logger.warning(f"Cannot get project: {namespace_project} for user: {self.username}, error: {e}") - return None - - -class AnonymousUser(User): - """Anonymous user.""" - - auth_header = "Renku-Auth-Anon-Id" - - def __init__(self, headers: dict, gitlab_url: str): - self.authenticated = ( - self.auth_header in headers - and headers[self.auth_header] != "" - # The anonymous id must start with an alphanumeric character - and re.match(r"^[a-zA-Z0-9]", headers[self.auth_header]) is not None - ) - if not self.authenticated: - return - self.git_url = gitlab_url - self.gitlab_client = Gitlab(self.git_url, api_version="4", per_page=50) - self.username = headers[self.auth_header] - self.safe_username = escapism.escape(self.username, escape_char="-").lower() - self.full_name = None - self.email = None - self.oidc_issuer = None - self.git_token = None - self.git_token_expires_at = 0 - self.access_token = None - self.refresh_token = None - self.id = headers[self.auth_header] - - def __str__(self) -> str: - return f"" - - -class RegisteredUser(User): - """Registered user.""" - - auth_headers = [ - "Renku-Auth-Access-Token", - "Renku-Auth-Id-Token", - ] - git_header = "Renku-Auth-Git-Credentials" - - def __init__(self, headers: dict[str, str]): - self.authenticated = all([header in headers for header in self.auth_headers]) - if not self.authenticated: - return - if not headers.get(self.git_header): - raise AuthenticationError( - "Your Gitlab credentials are invalid or expired, " - "please login Renku, or fully log out and log back in." - ) - - parsed_id_token = self.parse_jwt_from_headers(headers) - self.email = parsed_id_token["email"] - self.full_name = parsed_id_token["name"] - self.username = parsed_id_token["preferred_username"] - self.safe_username = escapism.escape(self.username, escape_char="-").lower() - self.oidc_issuer = parsed_id_token["iss"] - self.id = parsed_id_token["sub"] - self.access_token = headers["Renku-Auth-Access-Token"] - self.refresh_token = headers["Renku-Auth-Refresh-Token"] - - ( - self.git_url, - self.git_auth_header, - self.git_token, - self.git_token_expires_at, - ) = self.git_creds_from_headers(headers) - self.gitlab_client = Gitlab( - self.git_url, - api_version="4", - oauth_token=self.git_token, - per_page=50, - ) + def __init__(self, url: str, gitlab_token: str | None = None): + self.gitlab_client = Gitlab(url, api_version="4", oauth_token=gitlab_token, per_page=50) @property def gitlab_user(self) -> CurrentUser | None: @@ -115,43 +21,11 @@ def gitlab_user(self) -> CurrentUser | None: self.gitlab_client.auth() return self.gitlab_client.user - @staticmethod - def parse_jwt_from_headers(headers: dict[str, str]) -> dict[str, Any]: - """Parse the JWT.""" - # No need to verify the signature because this is already done by the gateway - decoded = jwt.decode(headers["Renku-Auth-Id-Token"], options={"verify_signature": False}) - decoded = cast(dict[str, Any], decoded) - return decoded - - @staticmethod - def git_creds_from_headers(headers: dict[str, str]) -> tuple[str, str, str, int]: - """Extract the git credentials from a header.""" - parsed_dict = json.loads(base64.decodebytes(headers["Renku-Auth-Git-Credentials"].encode())) - git_url, git_credentials = next(iter(parsed_dict.items())) - if not isinstance(git_url, str) or not isinstance(git_credentials, dict): - raise AuthenticationError(message="Could not successfully decode the git credentials header") - token_match = re.match(r"^[^\s]+\ ([^\s]+)$", git_credentials["AuthorizationHeader"]) - git_token = token_match.group(1) if token_match is not None else None - if not isinstance(git_token, str): - raise AuthenticationError(message="Could not successfully decode the git credentials header") - git_token_expires_at = git_credentials.get("AccessTokenExpiresAt") - if git_token_expires_at is None: - # INFO: Indicates that the token does not expire - git_token_expires_at = -1 - else: - try: - # INFO: Sometimes this can be a float, sometimes an int - git_token_expires_at = float(git_token_expires_at) - except ValueError: - git_token_expires_at = -1 - else: - git_token_expires_at = floor(git_token_expires_at) - return ( - git_url, - git_credentials["AuthorizationHeader"], - git_token, - git_token_expires_at, - ) - - def __str__(self) -> str: - return f"" + @lru_cache(maxsize=8) + def get_renku_project(self, namespace_project: str) -> Project | None: + """Retrieve the GitLab project.""" + try: + return self.gitlab_client.projects.get(f"{namespace_project}") + except Exception as e: + logger.warning(f"Cannot find the gitlab project: {namespace_project}, error: {e}") + return None diff --git a/components/renku_data_services/notebooks/api/schemas/cloud_storage.py b/components/renku_data_services/notebooks/api/schemas/cloud_storage.py index 917092552..11ee5a5ca 100644 --- a/components/renku_data_services/notebooks/api/schemas/cloud_storage.py +++ b/components/renku_data_services/notebooks/api/schemas/cloud_storage.py @@ -2,14 +2,17 @@ from configparser import ConfigParser from io import StringIO -from pathlib import Path -from typing import Any, Optional, Self +from pathlib import PurePosixPath +from typing import Any, Final, Optional, Self +from kubernetes import client from marshmallow import EXCLUDE, Schema, ValidationError, fields, validates_schema +from renku_data_services.base_models import APIUser from renku_data_services.notebooks.api.classes.cloud_storage import ICloudStorageRequest -from renku_data_services.notebooks.api.classes.user import User -from renku_data_services.notebooks.config import _NotebooksConfig +from renku_data_services.notebooks.config import NotebooksConfig + +_sanitize_for_serialization = client.ApiClient().sanitize_for_serialization class RCloneStorageRequest(Schema): @@ -36,6 +39,8 @@ def validate_storage(self, data: dict, **kwargs: dict) -> None: class RCloneStorage(ICloudStorageRequest): """RClone based storage.""" + pvc_secret_annotation_name: Final[str] = "csi-rclone.dev/secretName" + def __init__( self, source_path: str, @@ -43,10 +48,10 @@ def __init__( readonly: bool, mount_folder: str, name: Optional[str], - config: _NotebooksConfig, + config: NotebooksConfig, ) -> None: + """Creates a cloud storage instance without validating the configuration.""" self.config = config - self.config.storage_validator.validate_storage_configuration(configuration, source_path) self.configuration = configuration self.source_path = source_path self.mount_folder = mount_folder @@ -54,13 +59,14 @@ def __init__( self.name = name @classmethod - def storage_from_schema( + async def storage_from_schema( cls, data: dict[str, Any], - user: User, + user: APIUser, + internal_gitlab_user: APIUser, project_id: int, - work_dir: Path, - config: _NotebooksConfig, + work_dir: PurePosixPath, + config: NotebooksConfig, ) -> Self: """Create storage object from request.""" name = None @@ -76,7 +82,9 @@ def storage_from_schema( target_path, readonly, name, - ) = config.storage_validator.get_storage_by_id(user, project_id, data["storage_id"]) + ) = await config.storage_validator.get_storage_by_id( + user, internal_gitlab_user, project_id, data["storage_id"] + ) configuration = {**configuration, **(configuration or {})} readonly = readonly else: @@ -86,10 +94,76 @@ def storage_from_schema( readonly = data.get("readonly", True) mount_folder = str(work_dir / target_path) + await config.storage_validator.validate_storage_configuration(configuration, source_path) return cls(source_path, configuration, readonly, mount_folder, name, config) + def pvc( + self, + base_name: str, + namespace: str, + labels: dict[str, str] | None = None, + annotations: dict[str, str] | None = None, + ) -> client.V1PersistentVolumeClaim: + """The PVC for mounting cloud storage.""" + return client.V1PersistentVolumeClaim( + metadata=client.V1ObjectMeta( + name=base_name, + namespace=namespace, + annotations={self.pvc_secret_annotation_name: base_name} | (annotations or {}), + labels={"name": base_name} | (labels or {}), + ), + spec=client.V1PersistentVolumeClaimSpec( + access_modes=["ReadOnlyMany" if self.readonly else "ReadWriteMany"], + resources=client.V1VolumeResourceRequirements(requests={"storage": "10Gi"}), + storage_class_name=self.config.cloud_storage.storage_class, + ), + ) + + def volume_mount(self, base_name: str) -> client.V1VolumeMount: + """The volume mount for cloud storage.""" + return client.V1VolumeMount( + mount_path=self.mount_folder, + name=base_name, + read_only=self.readonly, + ) + + def volume(self, base_name: str) -> client.V1Volume: + """The volume entry for the statefulset specification.""" + return client.V1Volume( + name=base_name, + persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource( + claim_name=base_name, read_only=self.readonly + ), + ) + + def secret( + self, + base_name: str, + namespace: str, + labels: dict[str, str] | None = None, + annotations: dict[str, str] | None = None, + ) -> client.V1Secret: + """The secret containing the configuration for the rclone csi driver.""" + return client.V1Secret( + metadata=client.V1ObjectMeta( + name=base_name, + namespace=namespace, + annotations=annotations, + labels={"name": base_name} | (labels or {}), + ), + string_data={ + "remote": self.name or base_name, + "remotePath": self.source_path, + "configData": self.config_string(self.name or base_name), + }, + ) + def get_manifest_patch( - self, base_name: str, namespace: str, labels: dict = {}, annotations: dict = {} + self, + base_name: str, + namespace: str, + labels: dict[str, str] | None = None, + annotations: dict[str, str] | None = None, ) -> list[dict[str, Any]]: """Get server manifest patch.""" patches = [] @@ -100,57 +174,22 @@ def get_manifest_patch( { "op": "add", "path": f"/{base_name}-pv", - "value": { - "apiVersion": "v1", - "kind": "PersistentVolumeClaim", - "metadata": { - "name": base_name, - "labels": {"name": base_name}, - }, - "spec": { - "accessModes": ["ReadOnlyMany" if self.readonly else "ReadWriteMany"], - "resources": {"requests": {"storage": "10Gi"}}, - "storageClassName": self.config.cloud_storage.storage_class, - }, - }, + "value": _sanitize_for_serialization(self.pvc(base_name, namespace, labels, annotations)), }, { "op": "add", "path": f"/{base_name}-secret", - "value": { - "apiVersion": "v1", - "kind": "Secret", - "metadata": { - "name": base_name, - "labels": {"name": base_name}, - }, - "type": "Opaque", - "stringData": { - "remote": self.name or base_name, - "remotePath": self.source_path, - "configData": self.config_string(self.name or base_name), - }, - }, + "value": _sanitize_for_serialization(self.secret(base_name, namespace, labels, annotations)), }, { "op": "add", "path": "/statefulset/spec/template/spec/containers/0/volumeMounts/-", - "value": { - "mountPath": self.mount_folder, - "name": base_name, - "readOnly": self.readonly, - }, + "value": _sanitize_for_serialization(self.volume_mount(base_name)), }, { "op": "add", "path": "/statefulset/spec/template/spec/volumes/-", - "value": { - "name": base_name, - "persistentVolumeClaim": { - "claimName": base_name, - "readOnly": self.readonly, - }, - }, + "value": _sanitize_for_serialization(self.volume(base_name)), }, ], } diff --git a/components/renku_data_services/notebooks/api/schemas/server_options.py b/components/renku_data_services/notebooks/api/schemas/server_options.py index c62846ef8..cdd6fc096 100644 --- a/components/renku_data_services/notebooks/api/schemas/server_options.py +++ b/components/renku_data_services/notebooks/api/schemas/server_options.py @@ -6,6 +6,7 @@ from marshmallow import Schema, fields +from renku_data_services.crc.models import ResourceClass from renku_data_services.notebooks.api.schemas.custom_fields import ByteSizeField, CpuField, GpuField from renku_data_services.notebooks.config.dynamic import CPUEnforcement from renku_data_services.notebooks.errors.programming import ProgrammingError @@ -63,12 +64,12 @@ def __post_init__(self) -> None: self.storage = 1 elif self.storage is None and not self.gigabytes: self.storage = 1_000_000_000 - if not all([isinstance(i, NodeAffinity) for i in self.node_affinities]): + if not all([isinstance(affinity, NodeAffinity) for affinity in self.node_affinities]): raise ProgrammingError( message="Cannot create a ServerOptions dataclass with node " "affinities that are not of type NodeAffinity" ) - if not all([isinstance(i, Toleration) for i in self.tolerations]): + if not all([isinstance(toleration, Toleration) for toleration in self.tolerations]): raise ProgrammingError( message="Cannot create a ServerOptions dataclass with tolerations that are not of type Toleration" ) @@ -176,19 +177,22 @@ def to_k8s_resources(self, enforce_cpu_limits: CPUEnforcement = CPUEnforcement.O return resources @classmethod - def from_resource_class(cls, data: dict[str, Any]) -> Self: + def from_resource_class(cls, data: ResourceClass) -> Self: """Convert a CRC resource class to server options. Data Service uses GB for storage and memory whereas the notebook service uses bytes so we convert to bytes here. """ return cls( - cpu=data["cpu"], - memory=data["memory"] * 1000000000, - gpu=data["gpu"], - storage=data["default_storage"] * 1000000000, - node_affinities=[NodeAffinity(**a) for a in data.get("node_affinities", [])], - tolerations=[Toleration(t) for t in data.get("tolerations", [])], - resource_class_id=data.get("id"), + cpu=data.cpu, + memory=data.memory * 1_000_000_000, + gpu=data.gpu, + storage=data.default_storage * 1_000_000_000, + node_affinities=[ + NodeAffinity(key=a.key, required_during_scheduling=a.required_during_scheduling) + for a in data.node_affinities + ], + tolerations=[Toleration(t) for t in data.tolerations], + resource_class_id=data.id, ) @classmethod diff --git a/components/renku_data_services/notebooks/api/schemas/servers_get.py b/components/renku_data_services/notebooks/api/schemas/servers_get.py index e1ca99d94..110356c92 100644 --- a/components/renku_data_services/notebooks/api/schemas/servers_get.py +++ b/components/renku_data_services/notebooks/api/schemas/servers_get.py @@ -11,7 +11,7 @@ from renku_data_services.notebooks.api.classes.server_manifest import UserServerManifest from renku_data_services.notebooks.api.schemas.cloud_storage import LaunchNotebookResponseCloudStorage from renku_data_services.notebooks.api.schemas.custom_fields import ByteSizeField, CpuField, GpuField, LowercaseString -from renku_data_services.notebooks.config import _NotebooksConfig +from renku_data_services.notebooks.config import NotebooksConfig from renku_data_services.notebooks.config.static import _ServersGetEndpointAnnotations @@ -124,7 +124,7 @@ class Meta: unknown = EXCLUDE - annotations = fields.Nested(_ServersGetEndpointAnnotations().schema()) + annotations = fields.Nested(_ServersGetEndpointAnnotations().schema) name = fields.Str() state = fields.Dict() started = fields.DateTime(format="iso", allow_none=True) @@ -134,7 +134,7 @@ class Meta: image = fields.Str() @staticmethod - def format_user_pod_data(server: UserServerManifest, config: _NotebooksConfig) -> dict[str, Any]: + def format_user_pod_data(server: UserServerManifest, config: NotebooksConfig) -> dict[str, Any]: """Convert and format a server manifest object into what the API requires.""" def get_failed_container_exit_code(container_status: dict[str, Any]) -> int | str: @@ -250,8 +250,8 @@ def get_unschedulable_message(pod: dict[str, Any]) -> str | None: def get_all_container_statuses(server: UserServerManifest) -> list[dict[str, Any]]: return cast( list[dict[str, Any]], - server.manifest["status"].get("mainPod", {}).get("status", {}).get("containerStatuses", []) - + server.manifest["status"].get("mainPod", {}).get("status", {}).get("initContainerStatuses", []), + server.manifest.status.get("mainPod", {}).get("status", {}).get("containerStatuses", []) + + server.manifest.status.get("mainPod", {}).get("status", {}).get("initContainerStatuses", []), ) def get_failed_containers(container_statuses: list[dict[str, Any]]) -> list[dict[str, Any]]: @@ -275,17 +275,17 @@ def get_starting_message(step_summary: list[dict[str, Any]]) -> str | None: def is_user_anonymous(server: UserServerManifest, prefix: str = "renku.io/") -> bool: js = server.manifest - annotations = js.get("metadata", {}).get("annotations", {}) + annotations = js.metadata.annotations return ( - str(annotations.get(f"{prefix}userId", "")).startswith("anon-") - and str(annotations.get(f"{prefix}username", "")).startswith("anon-") - and str(js.get("metadata", {}).get("name", "")).startswith("anon-") + annotations.get(f"{prefix}userId", "").startswith("anon-") + and annotations.get(f"{prefix}username", "").startswith("anon-") + and js.metadata.name.startswith("anon-") ) def get_status_breakdown(server: UserServerManifest) -> list[dict[str, Any]]: js = server.manifest - init_container_summary = js.get("status", {}).get("containerStates", {}).get("init", {}) - container_summary = js.get("status", {}).get("containerStates", {}).get("regular", {}) + init_container_summary = js.status.get("containerStates", {}).get("init", {}) + container_summary = js.status.get("containerStates", {}).get("regular", {}) output = [] init_container_name_desc_xref = OrderedDict( [ @@ -303,7 +303,7 @@ def get_status_breakdown(server: UserServerManifest) -> list[dict[str, Any]]: ("jupyter-server", "Starting session"), ] ) - current_state = js.get("status", {}).get("state") + current_state = js.status.get("state") if current_state is None or current_state == ServerStatusEnum.Starting.value: # NOTE: This means that the server is starting and the statuses are not populated # yet, therefore in this case we will use defaults and set all statuses to waiting @@ -341,16 +341,16 @@ def get_status_breakdown(server: UserServerManifest) -> list[dict[str, Any]]: def get_status(server: UserServerManifest, started: datetime) -> dict[str, dict[str, Any]]: """Get the status of the jupyterserver.""" - state = server.manifest.get("status", {}).get("state", ServerStatusEnum.Starting.value) + state = server.manifest.status.get("state", ServerStatusEnum.Starting.value) output = { "state": state, } container_statuses = get_all_container_statuses(server) if state == ServerStatusEnum.Failed.value: failed_container_statuses = get_failed_containers(container_statuses) - unschedulable_msg = get_unschedulable_message(server.manifest.get("status", {}).get("mainPod", {})) + unschedulable_msg = get_unschedulable_message(server.manifest.status.get("mainPod", {})) event_based_messages = [] - events = server.manifest.get("status", {}).get("events", {}) + events = server.manifest.status.get("events", {}) for component in sorted(events.keys()): message = events.get(component, {}).get("message") if message is None: @@ -376,11 +376,12 @@ def get_status(server: UserServerManifest, started: datetime) -> dict[str, dict[ output["warnings"].append({"message": "Server was started using the default image."}) now = datetime.now(UTC) - annotations = server.manifest.get("metadata", {}).get("annotations", {}) + annotations = server.manifest.metadata.annotations last_activity_date_str = annotations.get("renku.io/lastActivityDate") - idle_threshold = server.manifest.get("spec", {}).get("culling", {}).get("idleSecondsThreshold", 0) + assert server.manifest.spec is not None + idle_threshold = server.manifest.spec.culling.idleSecondsThreshold critical: bool = False if idle_threshold > 0 and last_activity_date_str: @@ -401,9 +402,7 @@ def get_status(server: UserServerManifest, started: datetime) -> dict[str, dict[ hibernation_date_str = annotations.get("renku.io/hibernationDate") - hibernated_seconds_threshold = ( - server.manifest.get("spec", {}).get("culling", {}).get("hibernatedSecondsThreshold", 0) - ) + hibernated_seconds_threshold = server.manifest.spec.culling.hibernatedSecondsThreshold if hibernation_date_str and hibernated_seconds_threshold > 0 and not is_user_anonymous(server): hibernation_date = datetime.fromisoformat(hibernation_date_str) @@ -421,7 +420,7 @@ def get_status(server: UserServerManifest, started: datetime) -> dict[str, dict[ } ) - max_age_threshold = server.manifest.get("spec", {}).get("culling", {}).get("maxAgeSecondsThreshold", 0) + max_age_threshold = server.manifest.spec.culling.maxAgeSecondsThreshold age = (datetime.now(UTC) - started).total_seconds() remaining_session_time = max_age_threshold - age @@ -464,7 +463,7 @@ def get_resource_requests(server: UserServerManifest) -> dict[str, Any]: def get_resource_usage( server: UserServerManifest, ) -> dict[str, Union[str, int]]: - usage = server.manifest.get("status", {}).get("mainPod", {}).get("resourceUsage", {}) + usage = server.manifest.status.get("mainPod", {}).get("resourceUsage", {}) formatted_output = {} if "cpuMillicores" in usage: formatted_output["cpu"] = usage["cpuMillicores"] / 1000 @@ -474,7 +473,8 @@ def get_resource_usage( formatted_output["storage"] = usage["disk"]["usedBytes"] return formatted_output - started = datetime.fromisoformat(re.sub(r"Z$", "+00:00", server.manifest["metadata"]["creationTimestamp"])) + assert server.manifest.metadata.creationTimestamp is not None + started = server.manifest.metadata.creationTimestamp output = { "annotations": config.session_get_endpoint_annotations.sanitize_dict( @@ -486,7 +486,7 @@ def get_resource_usage( } ), "name": server.name, - "state": {"pod_name": server.manifest["status"].get("mainPod", {}).get("name")}, + "state": {"pod_name": server.manifest.status.get("mainPod", {}).get("name")}, "started": started, "status": get_status(server, started), "url": server.url, diff --git a/components/renku_data_services/notebooks/apispec.py b/components/renku_data_services/notebooks/apispec.py index e7d17b0c6..331ad10a5 100644 --- a/components/renku_data_services/notebooks/apispec.py +++ b/components/renku_data_services/notebooks/apispec.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: api.spec.yaml -# timestamp: 2024-08-13T13:29:51+00:00 +# timestamp: 2024-09-24T09:26:37+00:00 from __future__ import annotations @@ -8,7 +8,7 @@ from enum import Enum from typing import Any, Dict, List, Optional -from pydantic import ConfigDict, Field +from pydantic import ConfigDict, Field, RootModel from renku_data_services.notebooks.apispec_base import BaseAPISpec @@ -156,7 +156,7 @@ class StringServerOptionsChoice(BaseAPISpec): class UserPodResources(BaseAPISpec): - requests: ResourceRequests + requests: Optional[ResourceRequests] = None usage: Optional[ResourceUsage] = None @@ -216,6 +216,62 @@ class FieldUserPodAnnotations(BaseAPISpec): renku_io_username: Optional[str] = Field(None, alias="renku.io/username") +class State2(Enum): + running = "running" + hibernated = "hibernated" + + +class SessionPatchRequest(BaseAPISpec): + resource_class_id: Optional[int] = None + state: Optional[State2] = None + + +class State3(Enum): + running = "running" + starting = "starting" + stopping = "stopping" + failed = "failed" + hibernated = "hibernated" + + +class SessionStatus(BaseAPISpec): + message: Optional[str] = None + state: State3 + will_hibernate_at: Optional[datetime] = None + will_delete_at: Optional[datetime] = None + ready_containers: int = Field(..., ge=0) + total_containers: int = Field(..., ge=0) + + +class SessionResourcesRequests(BaseAPISpec): + cpu: Optional[float] = Field(None, description="Fractional CPUs") + gpu: Optional[int] = Field(None, description="Number of GPUs used") + memory: Optional[int] = Field( + None, description="Ammount of RAM for the session, in gigabytes" + ) + storage: Optional[int] = Field( + None, description="The size of disk storage for the session, in gigabytes" + ) + + +class SessionLogsResponse(RootModel[Optional[Dict[str, str]]]): + root: Optional[Dict[str, str]] = None + + +class SessionCloudStoragePost(BaseAPISpec): + configuration: Dict[str, Any] + readonly: bool = True + source_path: str + target_path: str + storage_id: str = Field( + ..., + description="ULID identifier", + max_length=26, + min_length=26, + pattern="^[0-7][0-9A-HJKMNP-TV-Z]{25}$", + ) + + class NotebooksImagesGetParametersQuery(BaseAPISpec): image_url: str @@ -235,10 +291,18 @@ class NotebooksServersServerNameDeleteParametersQuery(BaseAPISpec): forced: bool = False +class SessionsSessionIdLogsGetParametersQuery(BaseAPISpec): + max_lines: int = 250 + + +class SessionsImagesGetParametersQuery(BaseAPISpec): + image_url: str + + class LaunchNotebookRequest(BaseAPISpec): project_id: str launcher_id: str - image: str + image: Optional[str] = None repositories: List[LaunchNotebookRequestRepository] = [] cloudstorage: List[RCloneStorageRequest] = [] storage: int = 1 @@ -279,11 +343,21 @@ class ServerStatus(BaseAPISpec): warnings: Optional[List[ServerStatusWarning]] = None +class SessionResources(BaseAPISpec): + requests: Optional[SessionResourcesRequests] = None + + class NotebookResponse(BaseAPISpec): annotations: Optional[FieldUserPodAnnotations] = None cloudstorage: Optional[List[LaunchNotebookResponseCloudStorage]] = None image: Optional[str] = None - name: Optional[str] = None + name: Optional[str] = Field( + None, + example="d185e68d-d43-renku-2-b9ac279a4e8a85ac28d08", + max_length=50, + min_length=5, + pattern="^[a-z]([-a-z0-9]*[a-z0-9])?$", + ) resources: Optional[UserPodResources] = None started: Optional[datetime] = None state: Optional[Dict[str, Any]] = None @@ -293,3 +367,52 @@ class NotebookResponse(BaseAPISpec): class ServersGetResponse(BaseAPISpec): servers: Optional[Dict[str, NotebookResponse]] = None + + +class SessionPostRequest(BaseAPISpec): + launcher_id: str = Field( + ..., + description="ULID identifier", + max_length=26, + min_length=26, + pattern="^[0-7][0-9A-HJKMNP-TV-Z]{25}$", + ) + disk_storage: int = Field( + 1, description="The size of disk storage for the session, in gigabytes" + ) + resource_class_id: Optional[int] = None + cloudstorage: Optional[List[SessionCloudStoragePost]] = None + + +class SessionResponse(BaseAPISpec): + image: str + name: str = Field( + ..., + example="d185e68d-d43-renku-2-b9ac279a4e8a85ac28d08", + max_length=50, + min_length=5, + pattern="^[a-z]([-a-z0-9]*[a-z0-9])?$", + ) + resources: SessionResources + started: Optional[datetime] = Field(...) + status: SessionStatus + url: str + project_id: str = Field( + ..., + description="ULID identifier", + max_length=26, + min_length=26, + pattern="^[0-7][0-9A-HJKMNP-TV-Z]{25}$", + ) + launcher_id: str = Field( + ..., + description="ULID identifier", + max_length=26, + min_length=26, + pattern="^[0-7][0-9A-HJKMNP-TV-Z]{25}$", + ) + resource_class_id: int + + +class SessionListResponse(RootModel[List[SessionResponse]]): + root: List[SessionResponse] diff --git a/components/renku_data_services/notebooks/blueprints.py b/components/renku_data_services/notebooks/blueprints.py index c38df8347..576cdf5c7 100644 --- a/components/renku_data_services/notebooks/blueprints.py +++ b/components/renku_data_services/notebooks/blueprints.py @@ -1,55 +1,102 @@ """Notebooks service API.""" +import base64 import json as json_lib +import logging +import os from dataclasses import dataclass from datetime import UTC, datetime -from pathlib import Path +from math import floor +from pathlib import PurePosixPath from typing import Any +from urllib.parse import urljoin, urlparse import requests from gitlab.const import Visibility as GitlabVisibility from gitlab.v4.objects.projects import Project as GitlabProject +from kubernetes.client import V1ObjectMeta, V1Secret from marshmallow import ValidationError -from sanic import Request, json +from sanic import Request, empty, json from sanic.log import logger from sanic.response import HTTPResponse, JSONResponse from sanic_ext import validate +from toml import dumps +from ulid import ULID +from yaml import safe_dump +from renku_data_services import base_models +from renku_data_services.base_api.auth import authenticate, authenticate_2 from renku_data_services.base_api.blueprint import BlueprintFactoryResponse, CustomBlueprint +from renku_data_services.base_models import AnonymousAPIUser, APIUser, AuthenticatedAPIUser, Authenticator +from renku_data_services.crc.db import ResourcePoolRepository +from renku_data_services.data_connectors.db import DataConnectorProjectLinkRepository, DataConnectorRepository from renku_data_services.errors import errors from renku_data_services.notebooks import apispec +from renku_data_services.notebooks.api.amalthea_patches import git_proxy, init_containers from renku_data_services.notebooks.api.classes.auth import GitlabToken, RenkuTokens from renku_data_services.notebooks.api.classes.image import Image from renku_data_services.notebooks.api.classes.repository import Repository from renku_data_services.notebooks.api.classes.server import Renku1UserServer, Renku2UserServer, UserServer from renku_data_services.notebooks.api.classes.server_manifest import UserServerManifest -from renku_data_services.notebooks.api.classes.user import AnonymousUser, RegisteredUser +from renku_data_services.notebooks.api.classes.user import NotebooksGitlabClient from renku_data_services.notebooks.api.schemas.cloud_storage import RCloneStorage from renku_data_services.notebooks.api.schemas.config_server_options import ServerOptionsEndpointResponse from renku_data_services.notebooks.api.schemas.logs import ServerLogs from renku_data_services.notebooks.api.schemas.secrets import K8sUserSecrets from renku_data_services.notebooks.api.schemas.server_options import ServerOptions -from renku_data_services.notebooks.api.schemas.servers_get import NotebookResponse, ServersGetResponse +from renku_data_services.notebooks.api.schemas.servers_get import ( + NotebookResponse, + ServersGetResponse, +) from renku_data_services.notebooks.api.schemas.servers_patch import PatchServerStatusEnum -from renku_data_services.notebooks.config import _NotebooksConfig +from renku_data_services.notebooks.config import NotebooksConfig +from renku_data_services.notebooks.crs import ( + AmaltheaSessionSpec, + AmaltheaSessionV1Alpha1, + Authentication, + AuthenticationType, + Culling, + DataSource, + ExtraContainer, + ExtraVolume, + ExtraVolumeMount, + Ingress, + InitContainer, + Metadata, + Resources, + SecretAsVolume, + SecretAsVolumeItem, + SecretRefKey, + SecretRefWhole, + Session, + SessionEnvItem, + State, + Storage, + TlsSecret, +) from renku_data_services.notebooks.errors.intermittent import AnonymousUserPatchError, PVDisabledError from renku_data_services.notebooks.errors.programming import ProgrammingError -from renku_data_services.notebooks.errors.user import MissingResourceError, UserInputError -from renku_data_services.notebooks.util.authn import NotebooksAuthenticator, notebooks_authenticate +from renku_data_services.notebooks.errors.user import MissingResourceError from renku_data_services.notebooks.util.kubernetes_ import ( find_container, renku_1_make_server_name, renku_2_make_server_name, ) from renku_data_services.notebooks.util.repository import get_status +from renku_data_services.project.db import ProjectRepository +from renku_data_services.repositories.db import GitRepositoriesRepository +from renku_data_services.session.db import SessionRepository @dataclass(kw_only=True) class NotebooksBP(CustomBlueprint): """Handlers for manipulating notebooks.""" - authenticator: NotebooksAuthenticator - nb_config: _NotebooksConfig + authenticator: Authenticator + nb_config: NotebooksConfig + git_repo: GitRepositoriesRepository + internal_gitlab_authenticator: base_models.Authenticator + rp_repo: ResourcePoolRepository def version(self) -> BlueprintFactoryResponse: """Return notebook services version.""" @@ -82,18 +129,18 @@ async def _version(_: Request) -> JSONResponse: } return json(info) - return "/version", ["GET"], _version + return "/notebooks/version", ["GET"], _version def user_servers(self) -> BlueprintFactoryResponse: """Return a JSON of running servers for the user.""" - @notebooks_authenticate(self.authenticator) + @authenticate(self.authenticator) async def _user_servers( - request: Request, user: AnonymousUser | RegisteredUser, **query_params: dict + request: Request, user: AnonymousAPIUser | AuthenticatedAPIUser, **query_params: dict ) -> JSONResponse: servers = [ UserServerManifest(s, self.nb_config.sessions.default_image) - for s in self.nb_config.k8s_client.list_servers(user.safe_username) + for s in await self.nb_config.k8s_client.list_servers(user.id) ] filter_attrs = list(filter(lambda x: x[1] is not None, request.get_query_args())) filtered_servers = {} @@ -108,36 +155,39 @@ async def _user_servers( def user_server(self) -> BlueprintFactoryResponse: """Returns a user server based on its ID.""" - @notebooks_authenticate(self.authenticator) + @authenticate(self.authenticator) async def _user_server( - request: Request, user: RegisteredUser | AnonymousUser, server_name: str + request: Request, user: AnonymousAPIUser | AuthenticatedAPIUser, server_name: str ) -> JSONResponse: - server = self.nb_config.k8s_client.get_server(server_name, user.safe_username) + server = await self.nb_config.k8s_client.get_server(server_name, user.id) if server is None: - raise MissingResourceError(message=f"The server {server_name} does not exist.") + raise errors.MissingResourceError(message=f"The server {server_name} does not exist.") server = UserServerManifest(server, self.nb_config.sessions.default_image) return json(NotebookResponse().dump(server)) return "/notebooks/servers/", ["GET"], _user_server def launch_notebook(self) -> BlueprintFactoryResponse: - """Start a renku session using the old operator in renku v2.""" + """Start a renku session.""" - @notebooks_authenticate(self.authenticator) + @authenticate_2(self.authenticator, self.internal_gitlab_authenticator) @validate(json=apispec.LaunchNotebookRequest) async def _launch_notebook( - request: Request, user: RegisteredUser | AnonymousUser, body: apispec.LaunchNotebookRequest + request: Request, + user: AnonymousAPIUser | AuthenticatedAPIUser, + internal_gitlab_user: APIUser, + body: apispec.LaunchNotebookRequest, ) -> JSONResponse: server_name = renku_2_make_server_name( - safe_username=user.safe_username, project_id=body.project_id, launcher_id=body.launcher_id + safe_username=user.id, project_id=body.project_id, launcher_id=body.launcher_id ) server_class = Renku2UserServer - server, status_code = self.launch_notebook_helper( + server, status_code = await self.launch_notebook_helper( nb_config=self.nb_config, server_name=server_name, server_class=server_class, user=user, - image=body.image, + image=body.image or self.nb_config.sessions.default_image, resource_class_id=body.resource_class_id, storage=body.storage, environment_variables=body.environment_variables, @@ -156,24 +206,27 @@ async def _launch_notebook( project_id=body.project_id, launcher_id=body.launcher_id, repositories=body.repositories, + internal_gitlab_user=internal_gitlab_user, ) return json(NotebookResponse().dump(server), status_code) return "/notebooks/servers", ["POST"], _launch_notebook def launch_notebook_old(self) -> BlueprintFactoryResponse: - """Start a renku session using the old operator renku v1.""" + """Start a renku session using the old operator.""" - @notebooks_authenticate(self.authenticator) + @authenticate_2(self.authenticator, self.internal_gitlab_authenticator) @validate(json=apispec.LaunchNotebookRequestOld) async def _launch_notebook_old( - request: Request, user: RegisteredUser | AnonymousUser, body: apispec.LaunchNotebookRequestOld + request: Request, + user: AnonymousAPIUser | AuthenticatedAPIUser, + internal_gitlab_user: APIUser, + body: apispec.LaunchNotebookRequestOld, ) -> JSONResponse: - server_name = renku_1_make_server_name( - user.safe_username, body.namespace, body.project, body.branch, body.commit_sha - ) + server_name = renku_1_make_server_name(user.id, body.namespace, body.project, body.branch, body.commit_sha) project_slug = f"{body.namespace}/{body.project}" - gl_project = user.get_renku_project(project_slug) + gitlab_client = NotebooksGitlabClient(self.nb_config.git.url, APIUser.access_token) + gl_project = gitlab_client.get_renku_project(project_slug) if gl_project is None: raise errors.MissingResourceError(message=f"Cannot find gitlab project with slug {project_slug}") gl_project_path = gl_project.path @@ -188,7 +241,7 @@ async def _launch_notebook_old( else None ) - server, status_code = self.launch_notebook_helper( + server, status_code = await self.launch_notebook_helper( nb_config=self.nb_config, server_name=server_name, server_class=server_class, @@ -212,17 +265,18 @@ async def _launch_notebook_old( project_id=None, launcher_id=None, repositories=None, + internal_gitlab_user=internal_gitlab_user, ) return json(NotebookResponse().dump(server), status_code) return "/notebooks/old/servers", ["POST"], _launch_notebook_old @staticmethod - def launch_notebook_helper( - nb_config: _NotebooksConfig, + async def launch_notebook_helper( + nb_config: NotebooksConfig, server_name: str, server_class: type[UserServer], - user: AnonymousUser | RegisteredUser, + user: AnonymousAPIUser | AuthenticatedAPIUser, image: str, resource_class_id: int | None, storage: int | None, @@ -242,9 +296,10 @@ def launch_notebook_helper( project_id: str | None, # Renku 2.0 launcher_id: str | None, # Renku 2.0 repositories: list[apispec.LaunchNotebookRequestRepository] | None, # Renku 2.0 + internal_gitlab_user: APIUser, ) -> tuple[UserServerManifest, int]: """Helper function to launch a Jupyter server.""" - server = nb_config.k8s_client.get_server(server_name, user.safe_username) + server = await nb_config.k8s_client.get_server(server_name, user.id) if server: return UserServerManifest( @@ -262,8 +317,12 @@ def launch_notebook_helper( image_repo = parsed_image.repo_api() image_exists_publicly = image_repo.image_exists(parsed_image) image_exists_privately = False - if not image_exists_publicly and parsed_image.hostname == nb_config.git.registry and user.git_token: - image_repo = image_repo.with_oauth2_token(user.git_token) + if ( + not image_exists_publicly + and parsed_image.hostname == nb_config.git.registry + and internal_gitlab_user.access_token + ): + image_repo = image_repo.with_oauth2_token(internal_gitlab_user.access_token) image_exists_privately = image_repo.image_exists(parsed_image) if not image_exists_privately and not image_exists_publicly: using_default_image = True @@ -288,22 +347,24 @@ def launch_notebook_helper( # non-authenticated users. Also, a nice footgun from the Gitlab API Python library. is_image_private = getattr(gl_project, "visibility", GitlabVisibility.PUBLIC) != GitlabVisibility.PUBLIC image_repo = parsed_image.repo_api() - if is_image_private and user.git_token: - image_repo = image_repo.with_oauth2_token(user.git_token) + if is_image_private and internal_gitlab_user.access_token: + image_repo = image_repo.with_oauth2_token(internal_gitlab_user.access_token) if not image_repo.image_exists(parsed_image): - raise MissingResourceError( + raise errors.MissingResourceError( message=( f"Cannot start the session because the following the image {image} does not " "exist or the user does not have the permissions to access it." ) ) else: - raise UserInputError(message="Cannot determine which Docker image to use.") + raise errors.ValidationError(message="Cannot determine which Docker image to use.") parsed_server_options: ServerOptions | None = None if resource_class_id is not None: # A resource class ID was passed in, validate with CRC service - parsed_server_options = nb_config.crc_validator.validate_class_storage(user, resource_class_id, storage) + parsed_server_options = await nb_config.crc_validator.validate_class_storage( + user, resource_class_id, storage + ) elif server_options is not None: if isinstance(server_options, dict): requested_server_options = ServerOptions( @@ -322,9 +383,9 @@ def launch_notebook_helper( f"launching sessions: {type(server_options)}" ) # The old style API was used, try to find a matching class from the CRC service - parsed_server_options = nb_config.crc_validator.find_acceptable_class(user, requested_server_options) + parsed_server_options = await nb_config.crc_validator.find_acceptable_class(user, requested_server_options) if parsed_server_options is None: - raise UserInputError( + raise errors.ValidationError( message="Cannot find suitable server options based on your request and " "the available resource classes.", detail="You are receiving this error because you are using the old API for " @@ -333,15 +394,15 @@ def launch_notebook_helper( ) else: # No resource class ID specified or old-style server options, use defaults from CRC - default_resource_class = nb_config.crc_validator.get_default_class() - max_storage_gb = default_resource_class.get("max_storage", 0) + default_resource_class = await nb_config.crc_validator.get_default_class() + max_storage_gb = default_resource_class.max_storage if storage is not None and storage > max_storage_gb: - raise UserInputError( - "The requested storage amount is higher than the " + raise errors.ValidationError( + message="The requested storage amount is higher than the " f"allowable maximum for the default resource class of {max_storage_gb}GB." ) if storage is None: - storage = default_resource_class.get("default_storage") or 1 + storage = default_resource_class.default_storage parsed_server_options = ServerOptions.from_resource_class(default_resource_class) # Storage in request is in GB parsed_server_options.set_storage(storage, gigabytes=True) @@ -352,7 +413,7 @@ def launch_notebook_helper( if lfs_auto_fetch is not None: parsed_server_options.lfs_auto_fetch = lfs_auto_fetch - image_work_dir = image_repo.image_workdir(parsed_image) or Path("/") + image_work_dir = image_repo.image_workdir(parsed_image) or PurePosixPath("/") mount_path = image_work_dir / "work" server_work_dir = mount_path / gl_project_path @@ -363,23 +424,26 @@ def launch_notebook_helper( try: for cstorage in cloudstorage: storages.append( - RCloneStorage.storage_from_schema( + await RCloneStorage.storage_from_schema( cstorage.model_dump(), user=user, project_id=gl_project_id, - work_dir=server_work_dir.absolute(), + work_dir=server_work_dir, config=nb_config, + internal_gitlab_user=internal_gitlab_user, ) ) except ValidationError as e: - raise UserInputError(f"Couldn't load cloud storage config: {str(e)}") + raise errors.ValidationError(message=f"Couldn't load cloud storage config: {str(e)}") mount_points = set(s.mount_folder for s in storages if s.mount_folder and s.mount_folder != "/") if len(mount_points) != len(storages): - raise UserInputError( - "Storage mount points must be set, can't be at the root of the project and must be unique." + raise errors.ValidationError( + message="Storage mount points must be set, can't be at the root of the project and must be unique." ) if any(s1.mount_folder.startswith(s2.mount_folder) for s1 in storages for s2 in storages if s1 != s2): - raise UserInputError("Cannot mount a cloud storage into the mount point of another cloud storage.") + raise errors.ValidationError( + message="Cannot mount a cloud storage into the mount point of another cloud storage." + ) repositories = repositories or [] @@ -395,6 +459,8 @@ def launch_notebook_helper( launcher_id=launcher_id, project_id=project_id, notebook=notebook, + internal_gitlab_user=internal_gitlab_user, # Renku 1 + gitlab_project=gl_project, # Renku 1 ) server = server_class( user=user, @@ -415,24 +481,24 @@ def launch_notebook_helper( ) if len(server.safe_username) > 63: - raise UserInputError( + raise errors.ValidationError( message="A username cannot be longer than 63 characters, " f"your username is {len(server.safe_username)} characters long.", detail="This can occur if your username has been changed manually or by an admin.", ) - manifest = server.start() + manifest = await server.start() if manifest is None: raise errors.ProgrammingError(message="Failed to start server.") - logger.debug(f"Server {server.server_name} has been started") + logging.debug(f"Server {server.server_name} has been started") if k8s_user_secret is not None: owner_reference = { "apiVersion": "amalthea.dev/v1alpha1", "kind": "JupyterServer", "name": server.server_name, - "uid": manifest["metadata"]["uid"], + "uid": manifest.metadata.uid, "controller": True, } request_data = { @@ -443,8 +509,8 @@ def launch_notebook_helper( } headers = {"Authorization": f"bearer {user.access_token}"} - def _on_error(server_name: str, error_msg: str) -> None: - nb_config.k8s_client.delete_server(server_name, forced=True, safe_username=user.safe_username) + async def _on_error(server_name: str, error_msg: str) -> None: + await nb_config.k8s_client.delete_server(server_name, safe_username=user.id) raise RuntimeError(error_msg) try: @@ -455,42 +521,50 @@ def _on_error(server_name: str, error_msg: str) -> None: timeout=10, ) except requests.exceptions.ConnectionError: - _on_error(server.server_name, "User secrets storage service could not be contacted {exc}") + await _on_error(server.server_name, "User secrets storage service could not be contacted {exc}") if response.status_code != 201: - _on_error(server.server_name, f"User secret could not be created {response.json()}") + await _on_error(server.server_name, f"User secret could not be created {response.json()}") return UserServerManifest(manifest, nb_config.sessions.default_image), 201 def patch_server(self) -> BlueprintFactoryResponse: """Patch a user server by name based on the query param.""" - @notebooks_authenticate(self.authenticator) + @authenticate_2(self.authenticator, self.internal_gitlab_authenticator) @validate(json=apispec.PatchServerRequest) async def _patch_server( - request: Request, user: RegisteredUser | AnonymousUser, server_name: str, body: apispec.PatchServerRequest + request: Request, + user: AnonymousAPIUser | AuthenticatedAPIUser, + internal_gitlab_user: APIUser, + server_name: str, + body: apispec.PatchServerRequest, ) -> JSONResponse: if not self.nb_config.sessions.storage.pvs_enabled: raise PVDisabledError() - if isinstance(user, AnonymousUser): + if isinstance(user, AnonymousAPIUser): raise AnonymousUserPatchError() patch_body = body - server = self.nb_config.k8s_client.get_server(server_name, user.safe_username) + server = await self.nb_config.k8s_client.get_server(server_name, user.id) if server is None: raise errors.MissingResourceError(message=f"The server with name {server_name} cannot be found") + if server.spec is None: + raise errors.ProgrammingError(message="The server manifest is absent") new_server = server - currently_hibernated = server.get("spec", {}).get("jupyterServer", {}).get("hibernated", False) - currently_failing = server.get("status", {}).get("state", "running") == "failed" + currently_hibernated = server.spec.jupyterServer.hibernated + currently_failing = server.status.get("state", "running") == "failed" state = PatchServerStatusEnum.from_api_state(body.state) if body.state is not None else None resource_class_id = patch_body.resource_class_id if server and not (currently_hibernated or currently_failing) and resource_class_id: - raise UserInputError("The resource class can be changed only if the server is hibernated or failing") + raise errors.ValidationError( + message="The resource class can be changed only if the server is hibernated or failing" + ) if resource_class_id: - parsed_server_options = self.nb_config.crc_validator.validate_class_storage( + parsed_server_options = await self.nb_config.crc_validator.validate_class_storage( user, resource_class_id, storage=None, # we do not care about validating storage @@ -517,7 +591,7 @@ async def _patch_server( "value": parsed_server_options.priority_class, } ) - elif server.get("metadata", {}).get("labels", {}).get("renku.io/quota"): + elif server.metadata.labels.get("renku.io/quota"): js_patch.append( { "op": "remove", @@ -525,8 +599,8 @@ async def _patch_server( "path": "/metadata/labels/renku.io~1quota", } ) - new_server = self.nb_config.k8s_client.patch_server( - server_name=server_name, safe_username=user.safe_username, patch=js_patch + new_server = await self.nb_config.k8s_client.patch_server( + server_name=server_name, safe_username=user.id, patch=js_patch ) ss_patch: list[dict[str, Any]] = [ { @@ -535,11 +609,11 @@ async def _patch_server( "value": parsed_server_options.priority_class, } ] - self.nb_config.k8s_client.patch_statefulset(server_name=server_name, patch=ss_patch) + await self.nb_config.k8s_client.patch_statefulset(server_name=server_name, patch=ss_patch) if state == PatchServerStatusEnum.Hibernated: # NOTE: Do nothing if server is already hibernated - currently_hibernated = server.get("spec", {}).get("jupyterServer", {}).get("hibernated", False) + currently_hibernated = server.spec.jupyterServer.hibernated if server and currently_hibernated: logger.warning(f"Server {server_name} is already hibernated.") @@ -549,7 +623,7 @@ async def _patch_server( hibernation: dict[str, str | bool] = {"branch": "", "commit": "", "dirty": "", "synchronized": ""} - sidecar_patch = find_container(server.get("spec", {}).get("patches", []), "git-sidecar") + sidecar_patch = find_container(server.spec.patches, "git-sidecar") status = ( get_status( server_name=server_name, @@ -587,8 +661,8 @@ async def _patch_server( }, } - new_server = self.nb_config.k8s_client.patch_server( - server_name=server_name, safe_username=user.safe_username, patch=patch + new_server = await self.nb_config.k8s_client.patch_server( + server_name=server_name, safe_username=user.id, patch=patch ) elif state == PatchServerStatusEnum.Running: # NOTE: We clear hibernation annotations in Amalthea to avoid flickering in the UI (showing @@ -602,35 +676,44 @@ async def _patch_server( } # NOTE: The tokens in the session could expire if the session is hibernated long enough, # here we inject new ones to make sure everything is valid when the session starts back up. - if user.access_token is None or user.refresh_token is None or user.git_token is None: + if user.access_token is None or user.refresh_token is None or internal_gitlab_user.access_token is None: raise errors.UnauthorizedError( message="Cannot patch the server if the user is not fully logged in." ) renku_tokens = RenkuTokens(access_token=user.access_token, refresh_token=user.refresh_token) - gitlab_token = GitlabToken(access_token=user.git_token, expires_at=user.git_token_expires_at) - self.nb_config.k8s_client.patch_tokens(server_name, renku_tokens, gitlab_token) - new_server = self.nb_config.k8s_client.patch_server( - server_name=server_name, safe_username=user.safe_username, patch=patch + gitlab_token = GitlabToken( + access_token=internal_gitlab_user.access_token, + expires_at=( + floor(user.access_token_expires_at.timestamp()) + if user.access_token_expires_at is not None + else -1 + ), + ) + await self.nb_config.k8s_client.patch_tokens(server_name, renku_tokens, gitlab_token) + new_server = await self.nb_config.k8s_client.patch_server( + server_name=server_name, safe_username=user.id, patch=patch ) + manifest = UserServerManifest(new_server, self.nb_config.sessions.default_image) + notebook_response = apispec.NotebookResponse.parse_obj(manifest) return json( - NotebookResponse().dump(UserServerManifest(new_server, self.nb_config.sessions.default_image)), 200 + notebook_response.model_dump(), + 200, ) - return "/notebooks/servers", ["POST"], _patch_server + return "/notebooks/servers/", ["PATCH"], _patch_server def stop_server(self) -> BlueprintFactoryResponse: """Stop user server by name.""" - @notebooks_authenticate(self.authenticator) + @authenticate(self.authenticator) async def _stop_server( - request: Request, user: RegisteredUser | AnonymousUser, server_name: str + _: Request, user: AnonymousAPIUser | AuthenticatedAPIUser, server_name: str ) -> HTTPResponse: - forced: bool = request.query_args.get("forced") == "true" - self.nb_config.k8s_client.delete_server(server_name, forced=forced, safe_username=user.safe_username) + await self.nb_config.k8s_client.delete_server(server_name, safe_username=user.id) return HTTPResponse(status=204) - return "/notebooks/servers", ["DELETE"], _stop_server + return "/notebooks/servers/", ["DELETE"], _stop_server def server_options(self) -> BlueprintFactoryResponse: """Return a set of configurable server options.""" @@ -652,35 +735,402 @@ async def _server_options(request: Request) -> JSONResponse: def server_logs(self) -> BlueprintFactoryResponse: """Return the logs of the running server.""" - @notebooks_authenticate(self.authenticator) + @authenticate(self.authenticator) async def _server_logs( - request: Request, user: RegisteredUser | AnonymousUser, server_name: str + request: Request, user: AnonymousAPIUser | AuthenticatedAPIUser, server_name: str ) -> JSONResponse: - max_lines = int(request.query_args.get("max_lines", 250)) - logs = self.nb_config.k8s_client.get_server_logs( - server_name=server_name, - max_log_lines=max_lines, - safe_username=user.safe_username, - ) - return json(ServerLogs().dump(logs)) + args: dict[str, str | int] = request.get_args() + max_lines = int(args.get("max_lines", 250)) + try: + logs = await self.nb_config.k8s_client.get_server_logs( + server_name=server_name, + safe_username=user.id, + max_log_lines=max_lines, + ) + return json(ServerLogs().dump(logs)) + except MissingResourceError as err: + raise errors.MissingResourceError(message=err.message) return "/notebooks/logs/", ["GET"], _server_logs def check_docker_image(self) -> BlueprintFactoryResponse: """Return the availability of the docker image.""" - @notebooks_authenticate(self.authenticator) - async def _check_docker_image(request: Request, user: RegisteredUser | AnonymousUser) -> HTTPResponse: - image_url = request.query_args.get("image_url") + @authenticate_2(self.authenticator, self.internal_gitlab_authenticator) + async def _check_docker_image( + request: Request, user: AnonymousAPIUser | AuthenticatedAPIUser, internal_gitlab_user: APIUser + ) -> HTTPResponse: + image_url = request.get_args().get("image_url") if not isinstance(image_url, str): raise ValueError("required string of image url") parsed_image = Image.from_path(image_url) image_repo = parsed_image.repo_api() - if parsed_image.hostname == self.nb_config.git.registry and user.git_token: - image_repo = image_repo.with_oauth2_token(user.git_token) + if parsed_image.hostname == self.nb_config.git.registry and internal_gitlab_user.access_token: + image_repo = image_repo.with_oauth2_token(internal_gitlab_user.access_token) if image_repo.image_exists(parsed_image): return HTTPResponse(status=200) else: return HTTPResponse(status=404) return "/notebooks/images", ["GET"], _check_docker_image + + +@dataclass(kw_only=True) +class NotebooksNewBP(CustomBlueprint): + """Handlers for manipulating notebooks for the new Amalthea operator.""" + + authenticator: base_models.Authenticator + internal_gitlab_authenticator: base_models.Authenticator + nb_config: NotebooksConfig + project_repo: ProjectRepository + session_repo: SessionRepository + rp_repo: ResourcePoolRepository + data_connector_repo: DataConnectorRepository + data_connector_project_link_repo: DataConnectorProjectLinkRepository + + def start(self) -> BlueprintFactoryResponse: + """Start a session with the new operator.""" + + @authenticate_2(self.authenticator, self.internal_gitlab_authenticator) + @validate(json=apispec.SessionPostRequest) + async def _handler( + _: Request, + user: AuthenticatedAPIUser | AnonymousAPIUser, + internal_gitlab_user: APIUser, + body: apispec.SessionPostRequest, + ) -> JSONResponse: + # gitlab_client = NotebooksGitlabClient(self.nb_config.git.url, internal_gitlab_user.access_token) + launcher = await self.session_repo.get_launcher(user, ULID.from_str(body.launcher_id)) + project = await self.project_repo.get_project(user=user, project_id=launcher.project_id) + server_name = renku_2_make_server_name( + safe_username=user.id, project_id=str(launcher.project_id), launcher_id=body.launcher_id + ) + existing_session = await self.nb_config.k8s_v2_client.get_server(server_name, user.id) + if existing_session is not None and existing_session.spec is not None: + return json(existing_session.as_apispec().model_dump(exclude_none=True, mode="json")) + environment = launcher.environment + image = environment.container_image + default_resource_class = await self.rp_repo.get_default_resource_class() + if default_resource_class.id is None: + raise errors.ProgrammingError(message="The default resource class has to have an ID", quiet=True) + resource_class_id = body.resource_class_id or default_resource_class.id + await self.nb_config.crc_validator.validate_class_storage(user, resource_class_id, body.disk_storage) + work_dir = environment.working_directory + # user_secrets: K8sUserSecrets | None = None + # if body.user_secrets: + # user_secrets = K8sUserSecrets( + # name=server_name, + # user_secret_ids=body.user_secrets.user_secret_ids, + # mount_path=body.user_secrets.mount_path, + # ) + + # TODO + data_connector_links = await self.data_connector_project_link_repo.get_links_to( + user=user, project_id=project.id + ) + data_connectors = [ + await self.data_connector_repo.get_data_connector(user=user, data_connector_id=link.data_connector_id) + for link in data_connector_links + ] + # TODO: handle secrets? + cloud_storage: dict[str, RCloneStorage] = { + str(dc.id): RCloneStorage( + source_path=dc.storage.source_path, + mount_folder=(work_dir / dc.storage.target_path).as_posix(), + configuration=dc.storage.configuration, + readonly=dc.storage.readonly, + config=self.nb_config, + name=dc.name, + ) + for dc in data_connectors + } + cloud_storage_request: dict[str, RCloneStorage] = { + s.storage_id: RCloneStorage( + source_path=s.source_path, + mount_folder=(work_dir / s.target_path).as_posix(), + configuration=s.configuration, + readonly=s.readonly, + config=self.nb_config, + name=None, + ) + for s in body.cloudstorage or [] + } + + # NOTE: Check the cloud storage in the request body and if any match + # then overwrite the projects cloud storages + # NOTE: Cloud storages in the session launch request body that are not form the DB will cause a 422 error + for csr_id, csr in cloud_storage_request.items(): + if csr_id not in cloud_storage: + raise errors.MissingResourceError( + message=f"You have requested a cloud storage with ID {csr_id} which does not exist " + "or you dont have access to.", + quiet=True, + ) + cloud_storage[csr_id] = csr + repositories = [Repository(url=i) for i in project.repositories] + secrets_to_create: list[V1Secret] = [] + # Generate the cloud starge secrets + data_sources: list[DataSource] = [] + for ics, cs in enumerate(cloud_storage.values()): + secret_name = f"{server_name}-ds-{ics}" + secrets_to_create.append(cs.secret(secret_name, self.nb_config.k8s_client.preferred_namespace)) + data_sources.append( + DataSource(mountPath=cs.mount_folder, secretRef=SecretRefWhole(name=secret_name, adopt=True)) + ) + cert_init, cert_vols = init_containers.certificates_container(self.nb_config) + session_init_containers = [InitContainer.model_validate(self.nb_config.k8s_v2_client.sanitize(cert_init))] + extra_volumes = [ + ExtraVolume.model_validate(self.nb_config.k8s_v2_client.sanitize(volume)) for volume in cert_vols + ] + if isinstance(user, AuthenticatedAPIUser): + extra_volumes.append( + ExtraVolume( + name="renku-authorized-emails", + secret=SecretAsVolume( + secretName=server_name, + items=[SecretAsVolumeItem(key="authorized_emails", path="authorized_emails")], + ), + ) + ) + git_providers = await self.nb_config.git_provider_helper.get_providers(user=user) + git_clone = await init_containers.git_clone_container_v2( + user=user, + config=self.nb_config, + repositories=repositories, + git_providers=git_providers, + workspace_mount_path=launcher.environment.mount_directory, + work_dir=launcher.environment.working_directory, + ) + if git_clone is not None: + session_init_containers.append(InitContainer.model_validate(git_clone)) + extra_containers: list[ExtraContainer] = [] + git_proxy_container = await git_proxy.main_container( + user=user, config=self.nb_config, repositories=repositories, git_providers=git_providers + ) + if git_proxy_container is not None: + extra_containers.append( + ExtraContainer.model_validate(self.nb_config.k8s_v2_client.sanitize(git_proxy_container)) + ) + + base_server_url = self.nb_config.sessions.ingress.base_url(server_name) + base_server_path = self.nb_config.sessions.ingress.base_path(server_name) + annotations: dict[str, str] = { + "renku.io/project_id": str(launcher.project_id), + "renku.io/launcher_id": body.launcher_id, + "renku.io/resource_class_id": str(body.resource_class_id or default_resource_class.id), + } + manifest = AmaltheaSessionV1Alpha1( + metadata=Metadata(name=server_name, annotations=annotations), + spec=AmaltheaSessionSpec( + codeRepositories=[], + hibernated=False, + session=Session( + image=image, + urlPath=base_server_path, + port=environment.port, + storage=Storage( + className=self.nb_config.sessions.storage.pvs_storage_class, + size=str(body.disk_storage) + "G", + mountPath=environment.mount_directory.as_posix(), + ), + workingDir=environment.working_directory.as_posix(), + runAsUser=environment.uid, + runAsGroup=environment.gid, + resources=Resources(claims=None, requests=None, limits=None), + extraVolumeMounts=[], + command=environment.command, + args=environment.args, + shmSize="1G", + env=[ + SessionEnvItem(name="RENKU_BASE_URL_PATH", value=base_server_path), + SessionEnvItem(name="RENKU_BASE_URL", value=base_server_url), + ], + ), + ingress=Ingress( + host=self.nb_config.sessions.ingress.host, + ingressClassName=self.nb_config.sessions.ingress.annotations.get("kubernetes.io/ingress.class"), + annotations=self.nb_config.sessions.ingress.annotations, + tlsSecret=TlsSecret(adopt=False, name=self.nb_config.sessions.ingress.tls_secret) + if self.nb_config.sessions.ingress.tls_secret is not None + else None, + ), + extraContainers=extra_containers, + initContainers=session_init_containers, + extraVolumes=extra_volumes, + culling=Culling( + maxAge=f"{self.nb_config.sessions.culling.registered.max_age_seconds}s", + maxFailedDuration=f"{self.nb_config.sessions.culling.registered.failed_seconds}s", + maxHibernatedDuration=f"{self.nb_config.sessions.culling.registered.hibernated_seconds}s", + maxIdleDuration=f"{self.nb_config.sessions.culling.registered.idle_seconds}s", + maxStartingDuration=f"{self.nb_config.sessions.culling.registered.pending_seconds}s", + ), + authentication=Authentication( + enabled=True, + type=AuthenticationType.oauth2proxy + if isinstance(user, AuthenticatedAPIUser) + else AuthenticationType.token, + secretRef=SecretRefKey(name=server_name, key="auth", adopt=True), + extraVolumeMounts=[ + ExtraVolumeMount(name="renku-authorized-emails", mountPath="/authorized_emails") + ] + if isinstance(user, AuthenticatedAPIUser) + else [], + ), + dataSources=data_sources, + ), + ) + parsed_proxy_url = urlparse(urljoin(base_server_url + "/", "oauth2")) + secret_data = {} + if isinstance(user, AuthenticatedAPIUser): + secret_data["auth"] = dumps( + { + "provider": "oidc", + "client_id": self.nb_config.sessions.oidc.client_id, + "oidc_issuer_url": self.nb_config.sessions.oidc.issuer_url, + "session_cookie_minimal": True, + "skip_provider_button": True, + "redirect_url": urljoin(base_server_url + "/", "oauth2/callback"), + "cookie_path": base_server_path, + "proxy_prefix": parsed_proxy_url.path, + "authenticated_emails_file": "/authorized_emails/authorized_emails", + "client_secret": self.nb_config.sessions.oidc.client_secret, + "cookie_secret": base64.urlsafe_b64encode(os.urandom(32)).decode(), + "insecure_oidc_allow_unverified_email": self.nb_config.sessions.oidc.allow_unverified_email, + } + ) + secret_data["authorized_emails"] = user.email + else: + secret_data["auth"] = safe_dump( + { + "token": user.id, + "cookie_key": "Renku-Auth-Anon-Id", + "verbose": True, + } + ) + secrets_to_create.append(V1Secret(metadata=V1ObjectMeta(name=server_name), string_data=secret_data)) + for s in secrets_to_create: + await self.nb_config.k8s_v2_client.create_secret(s) + try: + manifest = await self.nb_config.k8s_v2_client.create_server(manifest, user.id) + except Exception: + for s in secrets_to_create: + await self.nb_config.k8s_v2_client.delete_secret(s.metadata.name) + raise errors.ProgrammingError(message="Could not start the amalthea session") + + return json(manifest.as_apispec().model_dump(mode="json", exclude_none=True), 201) + + return "/sessions", ["POST"], _handler + + def get_all(self) -> BlueprintFactoryResponse: + """Get all sessions for a user.""" + + @authenticate(self.authenticator) + async def _handler(_: Request, user: AuthenticatedAPIUser | AnonymousAPIUser) -> HTTPResponse: + sessions = await self.nb_config.k8s_v2_client.list_servers(user.id) + output: list[dict] = [] + for session in sessions: + output.append(session.as_apispec().model_dump(exclude_none=True, mode="json")) + return json(output) + + return "/sessions", ["GET"], _handler + + def get_one(self) -> BlueprintFactoryResponse: + """Get a specific session for a user.""" + + @authenticate(self.authenticator) + async def _handler(_: Request, user: AuthenticatedAPIUser | AnonymousAPIUser, session_id: str) -> HTTPResponse: + session = await self.nb_config.k8s_v2_client.get_server(session_id, user.id) + if session is None: + raise errors.ValidationError(message=f"The session with ID {session_id} does not exist.", quiet=True) + return json(session.as_apispec().model_dump(exclude_none=True, mode="json")) + + return "/sessions/", ["GET"], _handler + + def delete(self) -> BlueprintFactoryResponse: + """Fully delete a session with the new operator.""" + + @authenticate(self.authenticator) + async def _handler(_: Request, user: AuthenticatedAPIUser | AnonymousAPIUser, session_id: str) -> HTTPResponse: + await self.nb_config.k8s_v2_client.delete_server(session_id, user.id) + return empty() + + return "/sessions/", ["DELETE"], _handler + + def patch(self) -> BlueprintFactoryResponse: + """Patch a session.""" + + @authenticate(self.authenticator) + @validate(json=apispec.SessionPatchRequest) + async def _handler( + _: Request, + user: AuthenticatedAPIUser | AnonymousAPIUser, + session_id: str, + body: apispec.SessionPatchRequest, + ) -> HTTPResponse: + session = await self.nb_config.k8s_v2_client.get_server(session_id, user.id) + if session is None: + raise errors.MissingResourceError( + message=f"The sesison with ID {session_id} does not exist", quiet=True + ) + # TODO: Some patching should only be done when the session is in some states to avoid inadvertent restarts + patches: dict[str, Any] = {} + if body.resource_class_id is not None: + rcs = await self.rp_repo.get_classes(user, id=body.resource_class_id) + if len(rcs) == 0: + raise errors.MissingResourceError( + message=f"The resource class you requested with ID {body.resource_class_id} does not exist", + quiet=True, + ) + rc = rcs[0] + patches |= dict( + spec=dict( + session=dict( + resources=dict(requests=dict(cpu=f"{round(rc.cpu * 1000)}m", memory=f"{rc.memory}Gi")) + ) + ) + ) + # TODO: Add a config to specifiy the gpu kind, there is also GpuKind enum in reosurce_pools + patches["spec"]["session"]["resources"]["requests"]["nvidia.com/gpu"] = rc.gpu + # NOTE: K8s fails if the gpus limit is not equal to the requests because it cannot be overcommited + patches["spec"]["session"]["resources"]["limits"] = {"nvidia.com/gpu": rc.gpu} + if ( + body.state is not None + and body.state.value.lower() == State.Hibernated.value.lower() + and body.state.value.lower() != session.status.state.value.lower() + ): + if "spec" not in patches: + patches["spec"] = {} + patches["spec"]["hibernated"] = True + elif ( + body.state is not None + and body.state.value.lower() == State.Running.value.lower() + and session.status.state.value.lower() != body.state.value.lower() + ): + if "spec" not in patches: + patches["spec"] = {} + patches["spec"]["hibernated"] = False + + if len(patches) > 0: + new_session = await self.nb_config.k8s_v2_client.patch_server(session_id, user.id, patches) + else: + new_session = session + + return json(new_session.as_apispec().model_dump(exclude_none=True, mode="json")) + + return "/sessions/", ["PATCH"], _handler + + def logs(self) -> BlueprintFactoryResponse: + """Get logs from the session.""" + + @authenticate(self.authenticator) + @validate(query=apispec.SessionsSessionIdLogsGetParametersQuery) + async def _handler( + _: Request, + user: AuthenticatedAPIUser | AnonymousAPIUser, + session_id: str, + query: apispec.SessionsSessionIdLogsGetParametersQuery, + ) -> HTTPResponse: + logs = await self.nb_config.k8s_v2_client.get_server_logs(session_id, user.id, query.max_lines) + return json(apispec.SessionLogsResponse.model_validate(logs).model_dump(exclude_none=True)) + + return "/sessions//logs", ["GET"], _handler diff --git a/components/renku_data_services/notebooks/config/__init__.py b/components/renku_data_services/notebooks/config/__init__.py index 5d368f00c..bfb363bf9 100644 --- a/components/renku_data_services/notebooks/config/__init__.py +++ b/components/renku_data_services/notebooks/config/__init__.py @@ -4,6 +4,12 @@ from dataclasses import dataclass, field from typing import Any, Optional, Protocol, Self +from renku_data_services.base_models import APIUser +from renku_data_services.crc.db import ResourcePoolRepository +from renku_data_services.crc.models import ResourceClass +from renku_data_services.db_config.config import DBConfig +from renku_data_services.k8s.clients import K8sCoreClient, K8sSchedulingClient +from renku_data_services.k8s.quota import QuotaRepository from renku_data_services.notebooks.api.classes.data_service import ( CloudStorageConfig, CRCValidator, @@ -13,12 +19,18 @@ GitProviderHelper, StorageValidator, ) -from renku_data_services.notebooks.api.classes.k8s_client import JsServerCache, K8sClient, NamespacedK8sClient +from renku_data_services.notebooks.api.classes.k8s_client import ( + AmaltheaSessionV1Alpha1Kr8s, + JupyterServerV1Alpha1Kr8s, + K8sClient, + NamespacedK8sClient, + ServerCache, +) from renku_data_services.notebooks.api.classes.repository import GitProvider -from renku_data_services.notebooks.api.classes.user import User from renku_data_services.notebooks.api.schemas.server_options import ServerOptions from renku_data_services.notebooks.config.dynamic import ( _AmaltheaConfig, + _AmaltheaV2Config, _CloudStorage, _GitConfig, _K8sConfig, @@ -29,25 +41,28 @@ _UserSecrets, ) from renku_data_services.notebooks.config.static import _ServersGetEndpointAnnotations +from renku_data_services.notebooks.crs import AmaltheaSessionV1Alpha1, JupyterServerV1Alpha1 class CRCValidatorProto(Protocol): """Compute resource control validator.""" - def validate_class_storage( + async def validate_class_storage( self, - user: User, + user: APIUser, class_id: int, storage: Optional[int] = None, ) -> ServerOptions: """Validate the resource class storage for the session.""" ... - def get_default_class(self) -> dict[str, Any]: + async def get_default_class(self) -> ResourceClass: """Get the default resource class.""" ... - def find_acceptable_class(self, user: User, requested_server_options: ServerOptions) -> Optional[ServerOptions]: + async def find_acceptable_class( + self, user: APIUser, requested_server_options: ServerOptions + ) -> Optional[ServerOptions]: """Find a suitable resource class based on resource requirements.""" ... @@ -55,15 +70,17 @@ def find_acceptable_class(self, user: User, requested_server_options: ServerOpti class StorageValidatorProto(Protocol): """Cloud storage validator protocol.""" - def get_storage_by_id(self, user: User, project_id: int, storage_id: str) -> CloudStorageConfig: + async def get_storage_by_id( + self, user: APIUser, internal_gitlab_user: APIUser, project_id: int, storage_id: str + ) -> CloudStorageConfig: """Get storage by ID.""" ... - def validate_storage_configuration(self, configuration: dict[str, Any], source_path: str) -> None: + async def validate_storage_configuration(self, configuration: dict[str, Any], source_path: str) -> None: """Validate a storage configuration.""" ... - def obscure_password_fields_for_storage(self, configuration: dict[str, Any]) -> dict[str, Any]: + async def obscure_password_fields_for_storage(self, configuration: dict[str, Any]) -> dict[str, Any]: """Obscure passsword fields in storage credentials.""" ... @@ -71,13 +88,15 @@ def obscure_password_fields_for_storage(self, configuration: dict[str, Any]) -> class GitProviderHelperProto(Protocol): """Git provider protocol.""" - def get_providers(self, user: User) -> list[GitProvider]: + async def get_providers(self, user: APIUser) -> list[GitProvider]: """Get a list of git providers.""" ... @dataclass -class _NotebooksConfig: +class NotebooksConfig: + """The notebooks configuration.""" + server_options: _ServerOptionsConfig sessions: _SessionConfig amalthea: _AmaltheaConfig @@ -89,7 +108,8 @@ class _NotebooksConfig: crc_validator: CRCValidatorProto storage_validator: StorageValidatorProto git_provider_helper: GitProviderHelperProto - k8s_client: K8sClient + k8s_client: K8sClient[JupyterServerV1Alpha1, JupyterServerV1Alpha1Kr8s] + k8s_v2_client: K8sClient[AmaltheaSessionV1Alpha1, AmaltheaSessionV1Alpha1Kr8s] current_resource_schema_version: int = 1 anonymous_sessions_enabled: bool = False ssh_enabled: bool = False @@ -103,54 +123,65 @@ class _NotebooksConfig: ) @classmethod - def from_env(cls) -> Self: - dummy_stores = _parse_str_as_bool(os.environ.get("NB_DUMMY_STORES", False)) - sessions_config = _SessionConfig.from_env() - git_config = _GitConfig.from_env() - data_service_url = os.environ["NB_DATA_SERVICE_URL"] + def from_env(cls, db_config: DBConfig) -> Self: + """Create a configuration object from environment variables.""" + dummy_stores = _parse_str_as_bool(os.environ.get("DUMMY_STORES", False)) + sessions_config: _SessionConfig + git_config: _GitConfig + data_service_url = os.environ.get("NB_DATA_SERVICE_URL", "http://127.0.0.1:8000") server_options = _ServerOptionsConfig.from_env() crc_validator: CRCValidatorProto storage_validator: StorageValidatorProto git_provider_helper: GitProviderHelperProto + k8s_namespace = os.environ.get("K8S_NAMESPACE", "default") + quota_repo: QuotaRepository if dummy_stores: crc_validator = DummyCRCValidator() + sessions_config = _SessionConfig._for_testing() storage_validator = DummyStorageValidator() git_provider_helper = DummyGitProviderHelper() + amalthea_config = _AmaltheaConfig(cache_url="http://not.specified") + amalthea_v2_config = _AmaltheaV2Config(cache_url="http://not.specified") + git_config = _GitConfig("http://not.specified", "registry.not.specified") else: - crc_validator = CRCValidator(data_service_url) + quota_repo = QuotaRepository(K8sCoreClient(), K8sSchedulingClient(), namespace=k8s_namespace) + rp_repo = ResourcePoolRepository(db_config.async_session_maker, quota_repo) + crc_validator = CRCValidator(rp_repo) + sessions_config = _SessionConfig.from_env() storage_validator = StorageValidator(data_service_url) - git_provider_helper = GitProviderHelper(data_service_url, sessions_config.ingress.host, git_config.url) + amalthea_config = _AmaltheaConfig.from_env() + amalthea_v2_config = _AmaltheaV2Config.from_env() + git_config = _GitConfig.from_env() + git_provider_helper = GitProviderHelper( + data_service_url, f"http://{sessions_config.ingress.host}", git_config.url + ) k8s_config = _K8sConfig.from_env() - amalthea_config = _AmaltheaConfig.from_env() renku_ns_client = NamespacedK8sClient( - k8s_config.renku_namespace, - amalthea_config.group, - amalthea_config.version, - amalthea_config.plural, + k8s_config.renku_namespace, JupyterServerV1Alpha1, JupyterServerV1Alpha1Kr8s ) - session_ns_client = None - if k8s_config.sessions_namespace: - session_ns_client = NamespacedK8sClient( - k8s_config.sessions_namespace, - amalthea_config.group, - amalthea_config.version, - amalthea_config.plural, - ) - js_cache = JsServerCache(amalthea_config.cache_url) + js_cache = ServerCache(amalthea_config.cache_url, JupyterServerV1Alpha1) k8s_client = K8sClient( - js_cache=js_cache, + cache=js_cache, renku_ns_client=renku_ns_client, - session_ns_client=session_ns_client, + username_label="renku.io/safe-username", + ) + v2_cache = ServerCache(amalthea_v2_config.cache_url, AmaltheaSessionV1Alpha1) + renku_ns_v2_client = NamespacedK8sClient( + k8s_config.renku_namespace, AmaltheaSessionV1Alpha1, AmaltheaSessionV1Alpha1Kr8s + ) + k8s_v2_client = K8sClient( + cache=v2_cache, + renku_ns_client=renku_ns_v2_client, username_label="renku.io/safe-username", ) return cls( server_options=server_options, - sessions=_SessionConfig.from_env(), - amalthea=_AmaltheaConfig.from_env(), + sessions=sessions_config, + amalthea=amalthea_config, sentry=_SentryConfig.from_env(), - git=_GitConfig.from_env(), - k8s=_K8sConfig.from_env(), + git=git_config, + k8s=k8s_config, cloud_storage=_CloudStorage.from_env(), user_secrets=_UserSecrets.from_env(), current_resource_schema_version=1, @@ -164,4 +195,5 @@ def from_env(cls) -> Self: storage_validator=storage_validator, git_provider_helper=git_provider_helper, k8s_client=k8s_client, + k8s_v2_client=k8s_v2_client, ) diff --git a/components/renku_data_services/notebooks/config/dynamic.py b/components/renku_data_services/notebooks/config/dynamic.py index e08eca830..0e02ab29e 100644 --- a/components/renku_data_services/notebooks/config/dynamic.py +++ b/components/renku_data_services/notebooks/config/dynamic.py @@ -6,11 +6,14 @@ from enum import Enum from io import StringIO from typing import Any, ClassVar, Optional, Self, Union +from urllib.parse import urlunparse import yaml from ..api.schemas.config_server_options import ServerOptionsChoices, ServerOptionsDefaults +latest_version: str = "1.25.3" + def _parse_str_as_bool(val: Union[str, bool]) -> bool: if isinstance(val, str): @@ -97,11 +100,11 @@ def from_env(cls) -> Self: @dataclass class _GitProxyConfig: - sentry: _SentryConfig renku_client_secret: str = field(repr=False) + sentry: _SentryConfig = field(default_factory=_SentryConfig.from_env) port: int = 8080 health_port: int = 8081 - image: str = "renku/git-https-proxy:latest" + image: str = f"renku/git-https-proxy:{latest_version}" renku_client_id: str = "renku" @classmethod @@ -112,16 +115,16 @@ def from_env(cls) -> Self: sentry=_SentryConfig.from_env(prefix="NB_SESSIONS__GIT_PROXY__"), port=_parse_value_as_int(os.environ.get("NB_SESSIONS__GIT_PROXY__PORT", 8080)), health_port=_parse_value_as_int(os.environ.get("NB_SESSIONS__GIT_PROXY__HEALTH_PORT", 8081)), - image=os.environ.get("NB_SESSIONS__GIT_PROXY__IMAGE", "renku/git-https-proxy:latest"), + image=os.environ.get("NB_SESSIONS__GIT_PROXY__IMAGE", f"renku/git-https-proxy:{latest_version}"), ) @dataclass class _GitRpcServerConfig: - sentry: _SentryConfig + sentry: _SentryConfig = field(default_factory=_SentryConfig.from_env) host: str = "0.0.0.0" # nosec B104 port: int = 4000 - image: str = "renku/git-rpc-server:latest" + image: str = f"renku/git-rpc-server:{latest_version}" def __post_init__(self) -> None: self.port = _parse_value_as_int(self.port) @@ -129,7 +132,7 @@ def __post_init__(self) -> None: @classmethod def from_env(cls) -> Self: return cls( - image=os.environ.get("NB_SESSIONS__GIT_RPC_SERVER__IMAGE", "renku/git-rpc-server:latest"), + image=os.environ.get("NB_SESSIONS__GIT_RPC_SERVER__IMAGE", f"renku/git-rpc-server:{latest_version}"), host=os.environ.get("NB_SESSIONS__GIT_RPC_SERVER__HOST", "0.0.0.0"), # nosec B104 port=_parse_value_as_int(os.environ.get("NB_SESSIONS__GIT_RPC_SERVER__PORT", 4000)), sentry=_SentryConfig.from_env(prefix="NB_SESSIONS__GIT_RPC_SERVER__"), @@ -138,13 +141,13 @@ def from_env(cls) -> Self: @dataclass class _GitCloneConfig: - image: str = "renku/git-clone:latest" + image: str = f"renku/git-clone:{latest_version}" sentry: _SentryConfig = field(default_factory=lambda: _SentryConfig(enabled=False)) @classmethod def from_env(cls) -> Self: return cls( - image=os.environ.get("NB_SESSIONS__GIT_CLONE__IMAGE", "renku/git-rpc-server:latest"), + image=os.environ.get("NB_SESSIONS__GIT_CLONE__IMAGE", f"renku/git-clone:{latest_version}"), sentry=_SentryConfig.from_env(prefix="NB_SESSIONS__GIT_CLONE__"), ) @@ -171,9 +174,9 @@ class _SessionOidcConfig: client_secret: str = field(repr=False) token_url: str auth_url: str + issuer_url: str client_id: str = "renku-jupyterserver" allow_unverified_email: Union[str, bool] = False - config_url: str = "/auth/realms/Renku/.well-known/openid-configuration" def __post_init__(self) -> None: self.allow_unverified_email = _parse_str_as_bool(self.allow_unverified_email) @@ -188,9 +191,7 @@ def from_env(cls) -> Self: os.environ.get("NB_SESSIONS__OIDC__ALLOW_UNVERIFIED_EMAIL", False) ), client_id=os.environ.get("NB_SESSIONS__OIDC__CLIENT_ID", "renku-jupyterserver"), - config_url=os.environ.get( - "NB_SESSIONS__OIDC__CONFIG_URL", "/auth/realms/Renku/.well-known/openid-configuration" - ), + issuer_url=os.environ["NB_SESSIONS__OIDC__ISSUER_URL"], ) @@ -203,7 +204,7 @@ class _CustomCaCertsConfig: @classmethod def from_env(cls) -> Self: return cls( - image=os.environ.get("NB_SESSIONS__CA_CERTS__IMAGE", "renku-jupyterserver"), + image=os.environ.get("NB_SESSIONS__CA_CERTS__IMAGE", "renku/certificates:0.0.2"), path=os.environ.get("NB_SESSIONS__CA_CERTS__PATH", "/auth/realms/Renku/.well-known/openid-configuration"), secrets=yaml.safe_load(StringIO(os.environ.get("NB_SESSIONS__CA_CERTS__SECRETS", "[]"))), ) @@ -226,6 +227,23 @@ def from_env(cls) -> Self: ) +@dataclass +class _AmaltheaV2Config: + cache_url: str + group: str = "amalthea.dev" + version: str = "v1alpha1" + plural: str = "amaltheasessions" + + @classmethod + def from_env(cls) -> Self: + return cls( + cache_url=os.environ["NB_AMALTHEA_V2__CACHE_URL"], + group=os.environ.get("NB_AMALTHEA_V2__GROUP", "amalthea.dev"), + version=os.environ.get("NB_AMALTHEA_V2__VERSION", "v1alpha1"), + plural=os.environ.get("NB_AMALTHEA_V2__PLURAL", "amaltheasessions"), + ) + + @dataclass class _SessionIngress: host: str @@ -240,6 +258,13 @@ def from_env(cls) -> Self: annotations=yaml.safe_load(StringIO(os.environ.get("NB_SESSIONS__INGRESS__ANNOTATIONS", "{}"))), ) + def base_path(self, server_name: str) -> str: + return f"/sessions/{server_name}" + + def base_url(self, server_name: str) -> str: + scheme = "https" if self.tls_secret else "http" + return urlunparse((scheme, self.host, self.base_path(server_name), None, None, None)) + @dataclass class _GenericCullingConfig: @@ -389,20 +414,44 @@ def from_env(cls) -> Self: tolerations=yaml.safe_load(StringIO(os.environ.get("", "[]"))), ) + @classmethod + def _for_testing(cls) -> Self: + return cls( + culling=_SessionCullingConfig.from_env(), + git_proxy=_GitProxyConfig(renku_client_secret="not-defined"), # nosec B106 + git_rpc_server=_GitRpcServerConfig.from_env(), + git_clone=_GitCloneConfig.from_env(), + ingress=_SessionIngress(host="localhost"), + ca_certs=_CustomCaCertsConfig.from_env(), + oidc=_SessionOidcConfig( + client_id="not-defined", + client_secret="not-defined", # nosec B106 + token_url="http://not.defined", + auth_url="http://not.defined", + issuer_url="http://not.defined", + ), + storage=_SessionStorageConfig.from_env(), + containers=_SessionContainers.from_env(), + ssh=_SessionSshConfig.from_env(), + default_image=os.environ.get("", "renku/singleuser:latest"), + enforce_cpu_limits=CPUEnforcement(os.environ.get("", "off")), + termination_warning_duration_seconds=_parse_value_as_int(os.environ.get("", 12 * 60 * 60)), + image_default_workdir="/home/jovyan", + node_selector=yaml.safe_load(StringIO(os.environ.get("", "{}"))), + affinity=yaml.safe_load(StringIO(os.environ.get("", "{}"))), + tolerations=yaml.safe_load(StringIO(os.environ.get("", "[]"))), + ) + @dataclass class _K8sConfig: """Defines the k8s client and namespace.""" - renku_namespace: str - sessions_namespace: Optional[str] = None + renku_namespace: str = "default" @classmethod def from_env(cls) -> Self: - return cls( - renku_namespace=os.environ["KUBERNETES_NAMESPACE"], - sessions_namespace=os.environ.get("SESSIONS_NAMESPACE"), - ) + return cls(renku_namespace=os.environ.get("KUBERNETES_NAMESPACE", "default")) @dataclass @@ -449,7 +498,7 @@ def from_env(cls) -> Self: @dataclass class _UserSecrets: - image: str = "renku/secrets_mount:latest" + image: str = f"renku/secrets_mount:{latest_version}" secrets_storage_service_url: str = "http://renku-secrets-storage" def __post_init__(self) -> None: @@ -458,7 +507,7 @@ def __post_init__(self) -> None: @classmethod def from_env(cls) -> Self: return cls( - image=os.environ.get("NB_USER_SECRETS__IMAGE", "renku/secrets_mount:latest"), + image=os.environ.get("NB_USER_SECRETS__IMAGE", f"renku/secrets_mount:{latest_version}"), secrets_storage_service_url=os.environ.get( "NB_USER_SECRETS__SECRETS_STORAGE_SERVICE_URL", "http://renku-secrets-storage" ), diff --git a/components/renku_data_services/notebooks/config/static.py b/components/renku_data_services/notebooks/config/static.py index e31670f86..956401387 100644 --- a/components/renku_data_services/notebooks/config/static.py +++ b/components/renku_data_services/notebooks/config/static.py @@ -88,7 +88,7 @@ def __post_init__(self) -> None: annotation.get_field_name(sanitized=True): annotation.to_marshmallow_field() for annotation in self.annotations } - )(uknown=INCLUDE) + )(unknown=INCLUDE) def sanitize_dict(self, ann_dict: dict[str, str]) -> dict[str, str]: return cast(dict[str, str], self.schema().load(ann_dict)) diff --git a/components/renku_data_services/notebooks/cr_amalthea_session.py b/components/renku_data_services/notebooks/cr_amalthea_session.py new file mode 100644 index 000000000..a4c2e3fd9 --- /dev/null +++ b/components/renku_data_services/notebooks/cr_amalthea_session.py @@ -0,0 +1,2884 @@ +# generated by datamodel-codegen: +# filename: +# timestamp: 2024-09-04T21:22:45+00:00 + +from __future__ import annotations + +from datetime import datetime +from enum import Enum +from typing import Any, Dict, List, Optional, Union + +from pydantic import ConfigDict, Field +from renku_data_services.notebooks.cr_base import BaseCRD + + +class ExtraVolumeMount(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + mountPath: str = Field( + ..., + description="Path within the container at which the volume should be mounted. Must\nnot contain ':'.", + ) + mountPropagation: Optional[str] = Field( + default=None, + description="mountPropagation determines how mounts are propagated from the host\nto container and the other way around.\nWhen not set, MountPropagationNone is used.\nThis field is beta in 1.10.", + ) + name: str = Field(..., description="This must match the Name of a Volume.") + readOnly: Optional[bool] = Field( + default=None, + description="Mounted read-only if true, read-write otherwise (false or unspecified).\nDefaults to false.", + ) + subPath: Optional[str] = Field( + default=None, + description="Path within the volume from which the container's volume should be mounted.\nDefaults to \"\" (volume's root).", + ) + subPathExpr: Optional[str] = Field( + default=None, + description="Expanded path within the volume from which the container's volume should be mounted.\nBehaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.\nDefaults to \"\" (volume's root).\nSubPathExpr and SubPath are mutually exclusive.", + ) + + +class SecretRef(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + adopt: Optional[bool] = Field( + default=None, + description="If the secret is adopted then the operator will delete the secret when the custom resource that uses it is deleted.", + ) + key: str + name: str + + +class Type(Enum): + token = "token" + oauth2proxy = "oauth2proxy" + + +class Authentication(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + enabled: bool = True + extraVolumeMounts: Optional[List[ExtraVolumeMount]] = Field( + default=None, + description="Additional volume mounts for the authentication container.", + ) + secretRef: SecretRef = Field( + ..., + description="Kubernetes secret that contains the authentication configuration\nFor `token` a yaml file with the following keys is required:\n - token: the token value used to authenticate the user\n - cookie_key: the name of the cookie where the token will be saved and searched for\nFor `oauth2proxy` please see https://oauth2-proxy.github.io/oauth2-proxy/configuration/overview#config-file.\nNote that the `upstream` and `http_address` configuration options cannot be set from the secret because\nthe operator knows how to set these options to the proper values.", + ) + type: Type + + +class CloningConfigSecretRef(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + adopt: Optional[bool] = Field( + default=None, + description="If the secret is adopted then the operator will delete the secret when the custom resource that uses it is deleted.", + ) + key: str + name: str + + +class ConfigSecretRef(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + adopt: Optional[bool] = Field( + default=None, + description="If the secret is adopted then the operator will delete the secret when the custom resource that uses it is deleted.", + ) + key: str + name: str + + +class Type1(Enum): + git = "git" + + +class CodeRepository(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + clonePath: str = Field( + default=".", + description="Path relative to the session working directory where the repository should be cloned into.", + example="repositories/project1", + ) + cloningConfigSecretRef: Optional[CloningConfigSecretRef] = Field( + default=None, + description="The Kubernetes secret that contains the code repository configuration to be used during cloning.\nFor 'git' this should contain either:\nThe username and password\nThe private key and its corresponding password\nAn empty value can be used when cloning from public repositories using the http protocol\nNOTE: you have to specify the whole config in a single key in the secret.", + ) + configSecretRef: Optional[ConfigSecretRef] = Field( + default=None, + description="The Kubernetes secret that contains the code repository configuration to be used when the session is running.\nFor 'git' this is the git configuration which can be used to inject credentials in addition to any other repo-specific Git configuration.\nNOTE: you have to specify the whole config in a single key in the secret.", + ) + remote: str = Field( + ..., + description="The HTTP url to the code repository", + example="https://github.com/SwissDataScienceCenter/renku", + ) + revision: Optional[str] = Field( + default=None, + description="The tag, branch or commit SHA to checkout, if omitted then will be the tip of the default branch of the repo", + example="main", + ) + type: Type1 = Field( + default="git", + description="The type of the code repository - currently the only supported kind is git.", + ) + + +class Culling(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + maxAge: Optional[str] = Field( + default=None, + description='The maximum allowed age for a session, regardless of whether it\nis active or not. When the threshold is reached the session is hibernated.\nA value of zero indicates that Amalthea will not automatically hibernate\nthe session based on its age.\nGolang\'s time.ParseDuration is used to parse this, so values like 2h5min will work,\nvalid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".', + ) + maxFailedDuration: Optional[str] = Field( + default=None, + description='How long can a server be in failed state before it gets hibernated. A\nvalue of zero indicates that the server will not be automatically\nhibernated by Amalthea if it is failing.\nGolang\'s time.ParseDuration is used to parse this, so values like 2h5min will work,\nvalid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".', + ) + maxHibernatedDuration: Optional[str] = Field( + default=None, + description='How long can a session be in hibernated state before\nit gets completely deleted. A value of zero indicates that hibernated servers\nwill not be automatically be deleted by Amalthea after a period of time.\nGolang\'s time.ParseDuration is used to parse this, so values like 2h5min will work,\nvalid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".', + ) + maxIdleDuration: Optional[str] = Field( + default=None, + description='How long should a server be idle for before it is hibernated. A value of\nzero indicates that Amalthea will not automatically hibernate inactive sessions.\nGolang\'s time.ParseDuration is used to parse this, so values like 2h5min will work,\nvalid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".', + ) + maxStartingDuration: Optional[str] = Field( + default=None, + description='How long can a server be in starting state before it gets hibernated. A\nvalue of zero indicates that the server will not be automatically hibernated\nby Amalthea because it took to long to start.\nGolang\'s time.ParseDuration is used to parse this, so values like 2h5min will work,\nvalid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".', + ) + + +class SecretRef1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + adopt: Optional[bool] = Field( + default=None, + description="If the secret is adopted then the operator will delete the secret when the custom resource that uses it is deleted.", + ) + name: str + + +class Type2(Enum): + rclone = "rclone" + + +class DataSource(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + accessMode: str = Field( + default="ReadOnlyMany", description="The access mode for the data source" + ) + mountPath: str = Field( + default="data", + description="Path relative to the session working directory where the data should be mounted", + example="data/storages", + ) + secretRef: Optional[SecretRef1] = Field( + default=None, + description="The secret containing the configuration or credentials needed for access to the data.\nThe format of the configuration that is expected depends on the storage type.\nNOTE: define all values in a single key of the Kubernetes secret.\nrclone: any valid rclone configuration for a single remote, see the output of `rclone config providers` for validation and format.", + ) + type: Type2 = Field(default="rclone", description="The data source type") + + +class ConfigMapKeyRef(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + key: str = Field(..., description="The key to select.") + name: Optional[str] = Field( + default=None, + description="Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?", + ) + optional: Optional[bool] = Field( + default=None, + description="Specify whether the ConfigMap or its key must be defined", + ) + + +class FieldRef(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + apiVersion: Optional[str] = Field( + default=None, + description='Version of the schema the FieldPath is written in terms of, defaults to "v1".', + ) + fieldPath: str = Field( + ..., description="Path of the field to select in the specified API version." + ) + + +class ResourceFieldRef(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + containerName: Optional[str] = Field( + default=None, + description="Container name: required for volumes, optional for env vars", + ) + divisor: Optional[Union[int, str]] = Field( + default=None, + description='Specifies the output format of the exposed resources, defaults to "1"', + ) + resource: str = Field(..., description="Required: resource to select") + + +class SecretKeyRef(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + key: str = Field( + ..., + description="The key of the secret to select from. Must be a valid secret key.", + ) + name: Optional[str] = Field( + default=None, + description="Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?", + ) + optional: Optional[bool] = Field( + default=None, + description="Specify whether the Secret or its key must be defined", + ) + + +class ValueFrom(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + configMapKeyRef: Optional[ConfigMapKeyRef] = Field( + default=None, description="Selects a key of a ConfigMap." + ) + fieldRef: Optional[FieldRef] = Field( + default=None, + description="Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,\nspec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.", + ) + resourceFieldRef: Optional[ResourceFieldRef] = Field( + default=None, + description="Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.", + ) + secretKeyRef: Optional[SecretKeyRef] = Field( + default=None, description="Selects a key of a secret in the pod's namespace" + ) + + +class EnvItem(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + name: str = Field( + ..., description="Name of the environment variable. Must be a C_IDENTIFIER." + ) + value: Optional[str] = Field( + default=None, + description='Variable references $(VAR_NAME) are expanded\nusing the previously defined environment variables in the container and\nany service environment variables. If a variable cannot be resolved,\nthe reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.\n"$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".\nEscaped references will never be expanded, regardless of whether the variable\nexists or not.\nDefaults to "".', + ) + valueFrom: Optional[ValueFrom] = Field( + default=None, + description="Source for the environment variable's value. Cannot be used if value is not empty.", + ) + + +class ConfigMapRef(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + name: Optional[str] = Field( + default=None, + description="Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?", + ) + optional: Optional[bool] = Field( + default=None, description="Specify whether the ConfigMap must be defined" + ) + + +class SecretRef2(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + name: Optional[str] = Field( + default=None, + description="Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?", + ) + optional: Optional[bool] = Field( + default=None, description="Specify whether the Secret must be defined" + ) + + +class EnvFromItem(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + configMapRef: Optional[ConfigMapRef] = Field( + default=None, description="The ConfigMap to select from" + ) + prefix: Optional[str] = Field( + default=None, + description="An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.", + ) + secretRef: Optional[SecretRef2] = Field( + default=None, description="The Secret to select from" + ) + + +class Exec(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + command: Optional[List[str]] = Field( + default=None, + description="Command is the command line to execute inside the container, the working directory for the\ncommand is root ('/') in the container's filesystem. The command is simply exec'd, it is\nnot run inside a shell, so traditional shell instructions ('|', etc) won't work. To use\na shell, you need to explicitly call out to that shell.\nExit status of 0 is treated as live/healthy and non-zero is unhealthy.", + ) + + +class HttpHeader(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + name: str = Field( + ..., + description="The header field name.\nThis will be canonicalized upon output, so case-variant names will be understood as the same header.", + ) + value: str = Field(..., description="The header field value") + + +class HttpGet(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + host: Optional[str] = Field( + default=None, + description='Host name to connect to, defaults to the pod IP. You probably want to set\n"Host" in httpHeaders instead.', + ) + httpHeaders: Optional[List[HttpHeader]] = Field( + default=None, + description="Custom headers to set in the request. HTTP allows repeated headers.", + ) + path: Optional[str] = Field( + default=None, description="Path to access on the HTTP server." + ) + port: Union[int, str] = Field( + ..., + description="Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME.", + ) + scheme: Optional[str] = Field( + default=None, + description="Scheme to use for connecting to the host.\nDefaults to HTTP.", + ) + + +class TcpSocket(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + host: Optional[str] = Field( + default=None, + description="Optional: Host name to connect to, defaults to the pod IP.", + ) + port: Union[int, str] = Field( + ..., + description="Number or name of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME.", + ) + + +class PostStart(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + exec: Optional[Exec] = Field( + default=None, description="Exec specifies the action to take." + ) + httpGet: Optional[HttpGet] = Field( + default=None, description="HTTPGet specifies the http request to perform." + ) + tcpSocket: Optional[TcpSocket] = Field( + default=None, + description="Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified.", + ) + + +class HttpGet1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + host: Optional[str] = Field( + default=None, + description='Host name to connect to, defaults to the pod IP. You probably want to set\n"Host" in httpHeaders instead.', + ) + httpHeaders: Optional[List[HttpHeader]] = Field( + default=None, + description="Custom headers to set in the request. HTTP allows repeated headers.", + ) + path: Optional[str] = Field( + default=None, description="Path to access on the HTTP server." + ) + port: Union[int, str] = Field( + ..., + description="Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME.", + ) + scheme: Optional[str] = Field( + default=None, + description="Scheme to use for connecting to the host.\nDefaults to HTTP.", + ) + + +class PreStop(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + exec: Optional[Exec] = Field( + default=None, description="Exec specifies the action to take." + ) + httpGet: Optional[HttpGet1] = Field( + default=None, description="HTTPGet specifies the http request to perform." + ) + tcpSocket: Optional[TcpSocket] = Field( + default=None, + description="Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified.", + ) + + +class Lifecycle(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + postStart: Optional[PostStart] = Field( + default=None, + description="PostStart is called immediately after a container is created. If the handler fails,\nthe container is terminated and restarted according to its restart policy.\nOther management of the container blocks until the hook completes.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + ) + preStop: Optional[PreStop] = Field( + default=None, + description="PreStop is called immediately before a container is terminated due to an\nAPI request or management event such as liveness/startup probe failure,\npreemption, resource contention, etc. The handler is not called if the\ncontainer crashes or exits. The Pod's termination grace period countdown begins before the\nPreStop hook is executed. Regardless of the outcome of the handler, the\ncontainer will eventually terminate within the Pod's termination grace\nperiod (unless delayed by finalizers). Other management of the container blocks until the hook completes\nor until the termination grace period is reached.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + ) + + +class Grpc(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + port: int = Field( + ..., + description="Port number of the gRPC service. Number must be in the range 1 to 65535.", + ) + service: Optional[str] = Field( + default=None, + description="Service is the name of the service to place in the gRPC HealthCheckRequest\n(see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\n\nIf this is not specified, the default behavior is defined by gRPC.", + ) + + +class HttpGet2(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + host: Optional[str] = Field( + default=None, + description='Host name to connect to, defaults to the pod IP. You probably want to set\n"Host" in httpHeaders instead.', + ) + httpHeaders: Optional[List[HttpHeader]] = Field( + default=None, + description="Custom headers to set in the request. HTTP allows repeated headers.", + ) + path: Optional[str] = Field( + default=None, description="Path to access on the HTTP server." + ) + port: Union[int, str] = Field( + ..., + description="Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME.", + ) + scheme: Optional[str] = Field( + default=None, + description="Scheme to use for connecting to the host.\nDefaults to HTTP.", + ) + + +class LivenessProbe(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + exec: Optional[Exec] = Field( + default=None, description="Exec specifies the action to take." + ) + failureThreshold: Optional[int] = Field( + default=None, + description="Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1.", + ) + grpc: Optional[Grpc] = Field( + default=None, description="GRPC specifies an action involving a GRPC port." + ) + httpGet: Optional[HttpGet2] = Field( + default=None, description="HTTPGet specifies the http request to perform." + ) + initialDelaySeconds: Optional[int] = Field( + default=None, + description="Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + periodSeconds: Optional[int] = Field( + default=None, + description="How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1.", + ) + successThreshold: Optional[int] = Field( + default=None, + description="Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", + ) + tcpSocket: Optional[TcpSocket] = Field( + default=None, description="TCPSocket specifies an action involving a TCP port." + ) + terminationGracePeriodSeconds: Optional[int] = Field( + default=None, + description="Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset.", + ) + timeoutSeconds: Optional[int] = Field( + default=None, + description="Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + + +class Port(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + containerPort: int = Field( + ..., + description="Number of port to expose on the pod's IP address.\nThis must be a valid port number, 0 < x < 65536.", + ) + hostIP: Optional[str] = Field( + default=None, description="What host IP to bind the external port to." + ) + hostPort: Optional[int] = Field( + default=None, + description="Number of port to expose on the host.\nIf specified, this must be a valid port number, 0 < x < 65536.\nIf HostNetwork is specified, this must match ContainerPort.\nMost containers do not need this.", + ) + name: Optional[str] = Field( + default=None, + description="If specified, this must be an IANA_SVC_NAME and unique within the pod. Each\nnamed port in a pod must have a unique name. Name for the port that can be\nreferred to by services.", + ) + protocol: str = Field( + default="TCP", + description='Protocol for port. Must be UDP, TCP, or SCTP.\nDefaults to "TCP".', + ) + + +class HttpGet3(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + host: Optional[str] = Field( + default=None, + description='Host name to connect to, defaults to the pod IP. You probably want to set\n"Host" in httpHeaders instead.', + ) + httpHeaders: Optional[List[HttpHeader]] = Field( + default=None, + description="Custom headers to set in the request. HTTP allows repeated headers.", + ) + path: Optional[str] = Field( + default=None, description="Path to access on the HTTP server." + ) + port: Union[int, str] = Field( + ..., + description="Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME.", + ) + scheme: Optional[str] = Field( + default=None, + description="Scheme to use for connecting to the host.\nDefaults to HTTP.", + ) + + +class ReadinessProbe(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + exec: Optional[Exec] = Field( + default=None, description="Exec specifies the action to take." + ) + failureThreshold: Optional[int] = Field( + default=None, + description="Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1.", + ) + grpc: Optional[Grpc] = Field( + default=None, description="GRPC specifies an action involving a GRPC port." + ) + httpGet: Optional[HttpGet3] = Field( + default=None, description="HTTPGet specifies the http request to perform." + ) + initialDelaySeconds: Optional[int] = Field( + default=None, + description="Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + periodSeconds: Optional[int] = Field( + default=None, + description="How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1.", + ) + successThreshold: Optional[int] = Field( + default=None, + description="Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", + ) + tcpSocket: Optional[TcpSocket] = Field( + default=None, description="TCPSocket specifies an action involving a TCP port." + ) + terminationGracePeriodSeconds: Optional[int] = Field( + default=None, + description="Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset.", + ) + timeoutSeconds: Optional[int] = Field( + default=None, + description="Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + + +class ResizePolicyItem(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + resourceName: str = Field( + ..., + description="Name of the resource to which this resource resize policy applies.\nSupported values: cpu, memory.", + ) + restartPolicy: str = Field( + ..., + description="Restart policy to apply when specified resource is resized.\nIf not specified, it defaults to NotRequired.", + ) + + +class Claim(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + name: str = Field( + ..., + description="Name must match the name of one entry in pod.spec.resourceClaims of\nthe Pod where this field is used. It makes that resource available\ninside a container.", + ) + + +class Resources(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + claims: Optional[List[Claim]] = Field( + default=None, + description="Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable. It can only be set for containers.", + ) + limits: Optional[Dict[str, Union[int, str]]] = Field( + default=None, + description="Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + ) + requests: Optional[Dict[str, Union[int, str]]] = Field( + default=None, + description="Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + ) + + +class Capabilities(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + add: Optional[List[str]] = Field(default=None, description="Added capabilities") + drop: Optional[List[str]] = Field(default=None, description="Removed capabilities") + + +class SeLinuxOptions(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + level: Optional[str] = Field( + default=None, + description="Level is SELinux level label that applies to the container.", + ) + role: Optional[str] = Field( + default=None, + description="Role is a SELinux role label that applies to the container.", + ) + type: Optional[str] = Field( + default=None, + description="Type is a SELinux type label that applies to the container.", + ) + user: Optional[str] = Field( + default=None, + description="User is a SELinux user label that applies to the container.", + ) + + +class SeccompProfile(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + localhostProfile: Optional[str] = Field( + default=None, + description='localhostProfile indicates a profile defined in a file on the node should be used.\nThe profile must be preconfigured on the node to work.\nMust be a descending path, relative to the kubelet\'s configured seccomp profile location.\nMust be set if type is "Localhost". Must NOT be set for any other type.', + ) + type: str = Field( + ..., + description="type indicates which kind of seccomp profile will be applied.\nValid options are:\n\n\nLocalhost - a profile defined in a file on the node should be used.\nRuntimeDefault - the container runtime default profile should be used.\nUnconfined - no profile should be applied.", + ) + + +class WindowsOptions(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + gmsaCredentialSpec: Optional[str] = Field( + default=None, + description="GMSACredentialSpec is where the GMSA admission webhook\n(https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the\nGMSA credential spec named by the GMSACredentialSpecName field.", + ) + gmsaCredentialSpecName: Optional[str] = Field( + default=None, + description="GMSACredentialSpecName is the name of the GMSA credential spec to use.", + ) + hostProcess: Optional[bool] = Field( + default=None, + description="HostProcess determines if a container should be run as a 'Host Process' container.\nAll of a Pod's containers must have the same effective HostProcess value\n(it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).\nIn addition, if HostProcess is true then HostNetwork must also be set to true.", + ) + runAsUserName: Optional[str] = Field( + default=None, + description="The UserName in Windows to run the entrypoint of the container process.\nDefaults to the user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.", + ) + + +class SecurityContext(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + allowPrivilegeEscalation: Optional[bool] = Field( + default=None, + description="AllowPrivilegeEscalation controls whether a process can gain more\nprivileges than its parent process. This bool directly controls if\nthe no_new_privs flag will be set on the container process.\nAllowPrivilegeEscalation is true always when the container is:\n1) run as Privileged\n2) has CAP_SYS_ADMIN\nNote that this field cannot be set when spec.os.name is windows.", + ) + capabilities: Optional[Capabilities] = Field( + default=None, + description="The capabilities to add/drop when running containers.\nDefaults to the default set of capabilities granted by the container runtime.\nNote that this field cannot be set when spec.os.name is windows.", + ) + privileged: Optional[bool] = Field( + default=None, + description="Run container in privileged mode.\nProcesses in privileged containers are essentially equivalent to root on the host.\nDefaults to false.\nNote that this field cannot be set when spec.os.name is windows.", + ) + procMount: Optional[str] = Field( + default=None, + description="procMount denotes the type of proc mount to use for the containers.\nThe default is DefaultProcMount which uses the container runtime defaults for\nreadonly paths and masked paths.\nThis requires the ProcMountType feature flag to be enabled.\nNote that this field cannot be set when spec.os.name is windows.", + ) + readOnlyRootFilesystem: Optional[bool] = Field( + default=None, + description="Whether this container has a read-only root filesystem.\nDefault is false.\nNote that this field cannot be set when spec.os.name is windows.", + ) + runAsGroup: Optional[int] = Field( + default=None, + description="The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows.", + ) + runAsNonRoot: Optional[bool] = Field( + default=None, + description="Indicates that the container must run as a non-root user.\nIf true, the Kubelet will validate the image at runtime to ensure that it\ndoes not run as UID 0 (root) and fail to start the container if it does.\nIf unset or false, no such validation will be performed.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.", + ) + runAsUser: Optional[int] = Field( + default=None, + description="The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows.", + ) + seLinuxOptions: Optional[SeLinuxOptions] = Field( + default=None, + description="The SELinux context to be applied to the container.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows.", + ) + seccompProfile: Optional[SeccompProfile] = Field( + default=None, + description="The seccomp options to use by this container. If seccomp options are\nprovided at both the pod & container level, the container options\noverride the pod options.\nNote that this field cannot be set when spec.os.name is windows.", + ) + windowsOptions: Optional[WindowsOptions] = Field( + default=None, + description="The Windows specific settings applied to all containers.\nIf unspecified, the options from the PodSecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is linux.", + ) + + +class HttpGet4(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + host: Optional[str] = Field( + default=None, + description='Host name to connect to, defaults to the pod IP. You probably want to set\n"Host" in httpHeaders instead.', + ) + httpHeaders: Optional[List[HttpHeader]] = Field( + default=None, + description="Custom headers to set in the request. HTTP allows repeated headers.", + ) + path: Optional[str] = Field( + default=None, description="Path to access on the HTTP server." + ) + port: Union[int, str] = Field( + ..., + description="Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME.", + ) + scheme: Optional[str] = Field( + default=None, + description="Scheme to use for connecting to the host.\nDefaults to HTTP.", + ) + + +class StartupProbe(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + exec: Optional[Exec] = Field( + default=None, description="Exec specifies the action to take." + ) + failureThreshold: Optional[int] = Field( + default=None, + description="Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1.", + ) + grpc: Optional[Grpc] = Field( + default=None, description="GRPC specifies an action involving a GRPC port." + ) + httpGet: Optional[HttpGet4] = Field( + default=None, description="HTTPGet specifies the http request to perform." + ) + initialDelaySeconds: Optional[int] = Field( + default=None, + description="Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + periodSeconds: Optional[int] = Field( + default=None, + description="How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1.", + ) + successThreshold: Optional[int] = Field( + default=None, + description="Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", + ) + tcpSocket: Optional[TcpSocket] = Field( + default=None, description="TCPSocket specifies an action involving a TCP port." + ) + terminationGracePeriodSeconds: Optional[int] = Field( + default=None, + description="Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset.", + ) + timeoutSeconds: Optional[int] = Field( + default=None, + description="Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + + +class VolumeDevice(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + devicePath: str = Field( + ..., + description="devicePath is the path inside of the container that the device will be mapped to.", + ) + name: str = Field( + ..., + description="name must match the name of a persistentVolumeClaim in the pod", + ) + + +class VolumeMount(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + mountPath: str = Field( + ..., + description="Path within the container at which the volume should be mounted. Must\nnot contain ':'.", + ) + mountPropagation: Optional[str] = Field( + default=None, + description="mountPropagation determines how mounts are propagated from the host\nto container and the other way around.\nWhen not set, MountPropagationNone is used.\nThis field is beta in 1.10.", + ) + name: str = Field(..., description="This must match the Name of a Volume.") + readOnly: Optional[bool] = Field( + default=None, + description="Mounted read-only if true, read-write otherwise (false or unspecified).\nDefaults to false.", + ) + subPath: Optional[str] = Field( + default=None, + description="Path within the volume from which the container's volume should be mounted.\nDefaults to \"\" (volume's root).", + ) + subPathExpr: Optional[str] = Field( + default=None, + description="Expanded path within the volume from which the container's volume should be mounted.\nBehaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.\nDefaults to \"\" (volume's root).\nSubPathExpr and SubPath are mutually exclusive.", + ) + + +class ExtraContainer(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + args: Optional[List[str]] = Field( + default=None, + description='Arguments to the entrypoint.\nThe container image\'s CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container\'s environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will\nproduce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell', + ) + command: Optional[List[str]] = Field( + default=None, + description='Entrypoint array. Not executed within a shell.\nThe container image\'s ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container\'s environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will\nproduce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell', + ) + env: Optional[List[EnvItem]] = Field( + default=None, + description="List of environment variables to set in the container.\nCannot be updated.", + ) + envFrom: Optional[List[EnvFromItem]] = Field( + default=None, + description="List of sources to populate environment variables in the container.\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\nwill be reported as an event when the container is starting. When a key exists in multiple\nsources, the value associated with the last source will take precedence.\nValues defined by an Env with a duplicate key will take precedence.\nCannot be updated.", + ) + image: Optional[str] = Field( + default=None, + description="Container image name.\nMore info: https://kubernetes.io/docs/concepts/containers/images\nThis field is optional to allow higher level config management to default or override\ncontainer images in workload controllers like Deployments and StatefulSets.", + ) + imagePullPolicy: Optional[str] = Field( + default=None, + description="Image pull policy.\nOne of Always, Never, IfNotPresent.\nDefaults to Always if :latest tag is specified, or IfNotPresent otherwise.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + ) + lifecycle: Optional[Lifecycle] = Field( + default=None, + description="Actions that the management system should take in response to container lifecycle events.\nCannot be updated.", + ) + livenessProbe: Optional[LivenessProbe] = Field( + default=None, + description="Periodic probe of container liveness.\nContainer will be restarted if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + name: str = Field( + ..., + description="Name of the container specified as a DNS_LABEL.\nEach container in a pod must have a unique name (DNS_LABEL).\nCannot be updated.", + ) + ports: Optional[List[Port]] = Field( + default=None, + description='List of ports to expose from the container. Not specifying a port here\nDOES NOT prevent that port from being exposed. Any port which is\nlistening on the default "0.0.0.0" address inside a container will be\naccessible from the network.\nModifying this array with strategic merge patch may corrupt the data.\nFor more information See https://github.com/kubernetes/kubernetes/issues/108255.\nCannot be updated.', + ) + readinessProbe: Optional[ReadinessProbe] = Field( + default=None, + description="Periodic probe of container service readiness.\nContainer will be removed from service endpoints if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + resizePolicy: Optional[List[ResizePolicyItem]] = Field( + default=None, description="Resources resize policy for the container." + ) + resources: Optional[Resources] = Field( + default=None, + description="Compute Resources required by this container.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + ) + restartPolicy: Optional[str] = Field( + default=None, + description='RestartPolicy defines the restart behavior of individual containers in a pod.\nThis field may only be set for init containers, and the only allowed value is "Always".\nFor non-init containers or when this field is not specified,\nthe restart behavior is defined by the Pod\'s restart policy and the container type.\nSetting the RestartPolicy as "Always" for the init container will have the following effect:\nthis init container will be continually restarted on\nexit until all regular containers have terminated. Once all regular\ncontainers have completed, all init containers with restartPolicy "Always"\nwill be shut down. This lifecycle differs from normal init containers and\nis often referred to as a "sidecar" container. Although this init\ncontainer still starts in the init container sequence, it does not wait\nfor the container to complete before proceeding to the next init\ncontainer. Instead, the next init container starts immediately after this\ninit container is started, or after any startupProbe has successfully\ncompleted.', + ) + securityContext: Optional[SecurityContext] = Field( + default=None, + description="SecurityContext defines the security options the container should be run with.\nIf set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + ) + startupProbe: Optional[StartupProbe] = Field( + default=None, + description="StartupProbe indicates that the Pod has successfully initialized.\nIf specified, no other probes are executed until this completes successfully.\nIf this probe fails, the Pod will be restarted, just as if the livenessProbe failed.\nThis can be used to provide different probe parameters at the beginning of a Pod's lifecycle,\nwhen it might take a long time to load data or warm a cache, than during steady-state operation.\nThis cannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + stdin: Optional[bool] = Field( + default=None, + description="Whether this container should allocate a buffer for stdin in the container runtime. If this\nis not set, reads from stdin in the container will always result in EOF.\nDefault is false.", + ) + stdinOnce: Optional[bool] = Field( + default=None, + description="Whether the container runtime should close the stdin channel after it has been opened by\na single attach. When stdin is true the stdin stream will remain open across multiple attach\nsessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the\nfirst client attaches to stdin, and then remains open and accepts data until the client disconnects,\nat which time stdin is closed and remains closed until the container is restarted. If this\nflag is false, a container processes that reads from stdin will never receive an EOF.\nDefault is false", + ) + terminationMessagePath: Optional[str] = Field( + default=None, + description="Optional: Path at which the file to which the container's termination message\nwill be written is mounted into the container's filesystem.\nMessage written is intended to be brief final status, such as an assertion failure message.\nWill be truncated by the node if greater than 4096 bytes. The total message length across\nall containers will be limited to 12kb.\nDefaults to /dev/termination-log.\nCannot be updated.", + ) + terminationMessagePolicy: Optional[str] = Field( + default=None, + description="Indicate how the termination message should be populated. File will use the contents of\nterminationMessagePath to populate the container status message on both success and failure.\nFallbackToLogsOnError will use the last chunk of container log output if the termination\nmessage file is empty and the container exited with an error.\nThe log output is limited to 2048 bytes or 80 lines, whichever is smaller.\nDefaults to File.\nCannot be updated.", + ) + tty: Optional[bool] = Field( + default=None, + description="Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.\nDefault is false.", + ) + volumeDevices: Optional[List[VolumeDevice]] = Field( + default=None, + description="volumeDevices is the list of block devices to be used by the container.", + ) + volumeMounts: Optional[List[VolumeMount]] = Field( + default=None, + description="Pod volumes to mount into the container's filesystem.\nCannot be updated.", + ) + workingDir: Optional[str] = Field( + default=None, + description="Container's working directory.\nIf not specified, the container runtime's default will be used, which\nmight be configured in the container image.\nCannot be updated.", + ) + + +class AwsElasticBlockStore(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + fsType: Optional[str] = Field( + default=None, + description='fsType is the filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore\nTODO: how do we prevent errors in the filesystem from compromising the machine', + ) + partition: Optional[int] = Field( + default=None, + description='partition is the partition in the volume that you want to mount.\nIf omitted, the default is to mount by volume name.\nExamples: For volume /dev/sda1, you specify the partition as "1".\nSimilarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).', + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly value true will force the readOnly setting in VolumeMounts.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + ) + volumeID: str = Field( + ..., + description="volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + ) + + +class AzureDisk(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + cachingMode: Optional[str] = Field( + default=None, + description="cachingMode is the Host Caching mode: None, Read Only, Read Write.", + ) + diskName: str = Field( + ..., description="diskName is the Name of the data disk in the blob storage" + ) + diskURI: str = Field( + ..., description="diskURI is the URI of data disk in the blob storage" + ) + fsType: Optional[str] = Field( + default=None, + description='fsType is Filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.', + ) + kind: Optional[str] = Field( + default=None, + description="kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared", + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.", + ) + + +class AzureFile(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.", + ) + secretName: str = Field( + ..., + description="secretName is the name of secret that contains Azure Storage Account Name and Key", + ) + shareName: str = Field(..., description="shareName is the azure share Name") + + +class SecretRef3(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + name: Optional[str] = Field( + default=None, + description="Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?", + ) + + +class Cephfs(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + monitors: List[str] = Field( + ..., + description="monitors is Required: Monitors is a collection of Ceph monitors\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + ) + path: Optional[str] = Field( + default=None, + description="path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /", + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly is Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + ) + secretFile: Optional[str] = Field( + default=None, + description="secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + ) + secretRef: Optional[SecretRef3] = Field( + default=None, + description="secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + ) + user: Optional[str] = Field( + default=None, + description="user is optional: User is the rados user name, default is admin\nMore info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + ) + + +class Cinder(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + fsType: Optional[str] = Field( + default=None, + description='fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nExamples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md', + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md", + ) + secretRef: Optional[SecretRef3] = Field( + default=None, + description="secretRef is optional: points to a secret object containing parameters used to connect\nto OpenStack.", + ) + volumeID: str = Field( + ..., + description="volumeID used to identify the volume in cinder.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md", + ) + + +class Item(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + key: str = Field(..., description="key is the key to project.") + mode: Optional[int] = Field( + default=None, + description="mode is Optional: mode bits used to set permissions on this file.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.", + ) + path: str = Field( + ..., + description="path is the relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'.", + ) + + +class ConfigMap(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + defaultMode: Optional[int] = Field( + default=None, + description="defaultMode is optional: mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nDefaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.", + ) + items: Optional[List[Item]] = Field( + default=None, + description="items if unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.", + ) + name: Optional[str] = Field( + default=None, + description="Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?", + ) + optional: Optional[bool] = Field( + default=None, + description="optional specify whether the ConfigMap or its keys must be defined", + ) + + +class NodePublishSecretRef(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + name: Optional[str] = Field( + default=None, + description="Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?", + ) + + +class Csi(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + driver: str = Field( + ..., + description="driver is the name of the CSI driver that handles this volume.\nConsult with your admin for the correct name as registered in the cluster.", + ) + fsType: Optional[str] = Field( + default=None, + description='fsType to mount. Ex. "ext4", "xfs", "ntfs".\nIf not provided, the empty value is passed to the associated CSI driver\nwhich will determine the default filesystem to apply.', + ) + nodePublishSecretRef: Optional[NodePublishSecretRef] = Field( + default=None, + description="nodePublishSecretRef is a reference to the secret object containing\nsensitive information to pass to the CSI driver to complete the CSI\nNodePublishVolume and NodeUnpublishVolume calls.\nThis field is optional, and may be empty if no secret is required. If the\nsecret object contains more than one secret, all secret references are passed.", + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly specifies a read-only configuration for the volume.\nDefaults to false (read/write).", + ) + volumeAttributes: Optional[Dict[str, str]] = Field( + default=None, + description="volumeAttributes stores driver-specific properties that are passed to the CSI\ndriver. Consult your driver's documentation for supported values.", + ) + + +class Item1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + fieldRef: Optional[FieldRef] = Field( + default=None, + description="Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.", + ) + mode: Optional[int] = Field( + default=None, + description="Optional: mode bits used to set permissions on this file, must be an octal value\nbetween 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.", + ) + path: str = Field( + ..., + description="Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'", + ) + resourceFieldRef: Optional[ResourceFieldRef] = Field( + default=None, + description="Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.", + ) + + +class DownwardAPI(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + defaultMode: Optional[int] = Field( + default=None, + description="Optional: mode bits to use on created files by default. Must be a\nOptional: mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nDefaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.", + ) + items: Optional[List[Item1]] = Field( + default=None, description="Items is a list of downward API volume file" + ) + + +class EmptyDir(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + medium: Optional[str] = Field( + default=None, + description='medium represents what type of storage medium should back this directory.\nThe default is "" which means to use the node\'s default medium.\nMust be an empty string (default) or Memory.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir', + ) + sizeLimit: Optional[Union[int, str]] = Field( + default=None, + description="sizeLimit is the total amount of local storage required for this EmptyDir volume.\nThe size limit is also applicable for memory medium.\nThe maximum usage on memory medium EmptyDir would be the minimum value between\nthe SizeLimit specified here and the sum of memory limits of all containers in a pod.\nThe default is nil which means that the limit is undefined.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + ) + + +class DataSource1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + apiGroup: Optional[str] = Field( + default=None, + description="APIGroup is the group for the resource being referenced.\nIf APIGroup is not specified, the specified Kind must be in the core API group.\nFor any other third-party types, APIGroup is required.", + ) + kind: str = Field(..., description="Kind is the type of resource being referenced") + name: str = Field(..., description="Name is the name of resource being referenced") + + +class DataSourceRef(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + apiGroup: Optional[str] = Field( + default=None, + description="APIGroup is the group for the resource being referenced.\nIf APIGroup is not specified, the specified Kind must be in the core API group.\nFor any other third-party types, APIGroup is required.", + ) + kind: str = Field(..., description="Kind is the type of resource being referenced") + name: str = Field(..., description="Name is the name of resource being referenced") + namespace: Optional[str] = Field( + default=None, + description="Namespace is the namespace of resource being referenced\nNote that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.\n(Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.", + ) + + +class Resources1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + claims: Optional[List[Claim]] = Field( + default=None, + description="Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable. It can only be set for containers.", + ) + limits: Optional[Dict[str, Union[int, str]]] = Field( + default=None, + description="Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + ) + requests: Optional[Dict[str, Union[int, str]]] = Field( + default=None, + description="Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + ) + + +class MatchExpression(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + key: str = Field( + ..., description="key is the label key that the selector applies to." + ) + operator: str = Field( + ..., + description="operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist.", + ) + values: Optional[List[str]] = Field( + default=None, + description="values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch.", + ) + + +class Selector(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + matchExpressions: Optional[List[MatchExpression]] = Field( + default=None, + description="matchExpressions is a list of label selector requirements. The requirements are ANDed.", + ) + matchLabels: Optional[Dict[str, str]] = Field( + default=None, + description='matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is "key", the\noperator is "In", and the values array contains only "value". The requirements are ANDed.', + ) + + +class Spec1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + accessModes: Optional[List[str]] = Field( + default=None, + description="accessModes contains the desired access modes the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + ) + dataSource: Optional[DataSource1] = Field( + default=None, + description="dataSource field can be used to specify either:\n* An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)\n* An existing PVC (PersistentVolumeClaim)\nIf the provisioner or an external controller can support the specified data source,\nit will create a new volume based on the contents of the specified data source.\nWhen the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,\nand dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.\nIf the namespace is specified, then dataSourceRef will not be copied to dataSource.", + ) + dataSourceRef: Optional[DataSourceRef] = Field( + default=None, + description="dataSourceRef specifies the object from which to populate the volume with data, if a non-empty\nvolume is desired. This may be any object from a non-empty API group (non\ncore object) or a PersistentVolumeClaim object.\nWhen this field is specified, volume binding will only succeed if the type of\nthe specified object matches some installed volume populator or dynamic\nprovisioner.\nThis field will replace the functionality of the dataSource field and as such\nif both fields are non-empty, they must have the same value. For backwards\ncompatibility, when namespace isn't specified in dataSourceRef,\nboth fields (dataSource and dataSourceRef) will be set to the same\nvalue automatically if one of them is empty and the other is non-empty.\nWhen namespace is specified in dataSourceRef,\ndataSource isn't set to the same value and must be empty.\nThere are three important differences between dataSource and dataSourceRef:\n* While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.\n(Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.", + ) + resources: Optional[Resources1] = Field( + default=None, + description="resources represents the minimum resources the volume should have.\nIf RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements\nthat are lower than previous value but must still be higher than capacity recorded in the\nstatus field of the claim.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", + ) + selector: Optional[Selector] = Field( + default=None, + description="selector is a label query over volumes to consider for binding.", + ) + storageClassName: Optional[str] = Field( + default=None, + description="storageClassName is the name of the StorageClass required by the claim.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", + ) + volumeMode: Optional[str] = Field( + default=None, + description="volumeMode defines what type of volume is required by the claim.\nValue of Filesystem is implied when not included in claim spec.", + ) + volumeName: Optional[str] = Field( + default=None, + description="volumeName is the binding reference to the PersistentVolume backing this claim.", + ) + + +class VolumeClaimTemplate(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + metadata: Optional[Dict[str, Any]] = Field( + default=None, + description="May contain labels and annotations that will be copied into the PVC\nwhen creating it. No other fields are allowed and will be rejected during\nvalidation.", + ) + spec: Spec1 = Field( + ..., + description="The specification for the PersistentVolumeClaim. The entire content is\ncopied unchanged into the PVC that gets created from this\ntemplate. The same fields as in a PersistentVolumeClaim\nare also valid here.", + ) + + +class Ephemeral(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + volumeClaimTemplate: Optional[VolumeClaimTemplate] = Field( + default=None, + description="Will be used to create a stand-alone PVC to provision the volume.\nThe pod in which this EphemeralVolumeSource is embedded will be the\nowner of the PVC, i.e. the PVC will be deleted together with the\npod. The name of the PVC will be `-` where\n`` is the name from the `PodSpec.Volumes` array\nentry. Pod validation will reject the pod if the concatenated name\nis not valid for a PVC (for example, too long).\n\n\nAn existing PVC with that name that is not owned by the pod\nwill *not* be used for the pod to avoid using an unrelated\nvolume by mistake. Starting the pod is then blocked until\nthe unrelated PVC is removed. If such a pre-created PVC is\nmeant to be used by the pod, the PVC has to updated with an\nowner reference to the pod once the pod exists. Normally\nthis should not be necessary, but it may be useful when\nmanually reconstructing a broken cluster.\n\n\nThis field is read-only and no changes will be made by Kubernetes\nto the PVC after it has been created.\n\n\nRequired, must not be nil.", + ) + + +class Fc(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + fsType: Optional[str] = Field( + default=None, + description='fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.\nTODO: how do we prevent errors in the filesystem from compromising the machine', + ) + lun: Optional[int] = Field( + default=None, description="lun is Optional: FC target lun number" + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly is Optional: Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.", + ) + targetWWNs: Optional[List[str]] = Field( + default=None, + description="targetWWNs is Optional: FC target worldwide names (WWNs)", + ) + wwids: Optional[List[str]] = Field( + default=None, + description="wwids Optional: FC volume world wide identifiers (wwids)\nEither wwids or combination of targetWWNs and lun must be set, but not both simultaneously.", + ) + + +class FlexVolume(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + driver: str = Field( + ..., description="driver is the name of the driver to use for this volume." + ) + fsType: Optional[str] = Field( + default=None, + description='fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.', + ) + options: Optional[Dict[str, str]] = Field( + default=None, + description="options is Optional: this field holds extra command options if any.", + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly is Optional: defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.", + ) + secretRef: Optional[SecretRef3] = Field( + default=None, + description="secretRef is Optional: secretRef is reference to the secret object containing\nsensitive information to pass to the plugin scripts. This may be\nempty if no secret object is specified. If the secret object\ncontains more than one secret, all secrets are passed to the plugin\nscripts.", + ) + + +class Flocker(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + datasetName: Optional[str] = Field( + default=None, + description="datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker\nshould be considered as deprecated", + ) + datasetUUID: Optional[str] = Field( + default=None, + description="datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset", + ) + + +class GcePersistentDisk(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + fsType: Optional[str] = Field( + default=None, + description='fsType is filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk\nTODO: how do we prevent errors in the filesystem from compromising the machine', + ) + partition: Optional[int] = Field( + default=None, + description='partition is the partition in the volume that you want to mount.\nIf omitted, the default is to mount by volume name.\nExamples: For volume /dev/sda1, you specify the partition as "1".\nSimilarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk', + ) + pdName: str = Field( + ..., + description="pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + ) + + +class GitRepo(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + directory: Optional[str] = Field( + default=None, + description="directory is the target directory name.\nMust not contain or start with '..'. If '.' is supplied, the volume directory will be the\ngit repository. Otherwise, if specified, the volume will contain the git repository in\nthe subdirectory with the given name.", + ) + repository: str = Field(..., description="repository is the URL") + revision: Optional[str] = Field( + default=None, + description="revision is the commit hash for the specified revision.", + ) + + +class Glusterfs(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + endpoints: str = Field( + ..., + description="endpoints is the endpoint name that details Glusterfs topology.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + ) + path: str = Field( + ..., + description="path is the Glusterfs volume path.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly here will force the Glusterfs volume to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + ) + + +class HostPath(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + path: str = Field( + ..., + description="path of the directory on the host.\nIf the path is a symlink, it will follow the link to the real path.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + ) + type: Optional[str] = Field( + default=None, + description='type for HostPath Volume\nDefaults to ""\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath', + ) + + +class Iscsi(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + chapAuthDiscovery: Optional[bool] = Field( + default=None, + description="chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication", + ) + chapAuthSession: Optional[bool] = Field( + default=None, + description="chapAuthSession defines whether support iSCSI Session CHAP authentication", + ) + fsType: Optional[str] = Field( + default=None, + description='fsType is the filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi\nTODO: how do we prevent errors in the filesystem from compromising the machine', + ) + initiatorName: Optional[str] = Field( + default=None, + description="initiatorName is the custom iSCSI Initiator Name.\nIf initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface\n: will be created for the connection.", + ) + iqn: str = Field(..., description="iqn is the target iSCSI Qualified Name.") + iscsiInterface: Optional[str] = Field( + default=None, + description="iscsiInterface is the interface Name that uses an iSCSI transport.\nDefaults to 'default' (tcp).", + ) + lun: int = Field(..., description="lun represents iSCSI Target Lun number.") + portals: Optional[List[str]] = Field( + default=None, + description="portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260).", + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.", + ) + secretRef: Optional[SecretRef3] = Field( + default=None, + description="secretRef is the CHAP Secret for iSCSI target and initiator authentication", + ) + targetPortal: str = Field( + ..., + description="targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port\nis other than default (typically TCP ports 860 and 3260).", + ) + + +class Nfs(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + path: str = Field( + ..., + description="path that is exported by the NFS server.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly here will force the NFS export to be mounted with read-only permissions.\nDefaults to false.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + ) + server: str = Field( + ..., + description="server is the hostname or IP address of the NFS server.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + ) + + +class PersistentVolumeClaim(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + claimName: str = Field( + ..., + description="claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly Will force the ReadOnly setting in VolumeMounts.\nDefault false.", + ) + + +class PhotonPersistentDisk(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + fsType: Optional[str] = Field( + default=None, + description='fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.', + ) + pdID: str = Field( + ..., + description="pdID is the ID that identifies Photon Controller persistent disk", + ) + + +class PortworxVolume(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + fsType: Optional[str] = Field( + default=None, + description='fSType represents the filesystem type to mount\nMust be a filesystem type supported by the host operating system.\nEx. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.', + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.", + ) + volumeID: str = Field( + ..., description="volumeID uniquely identifies a Portworx volume" + ) + + +class Item2(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + key: str = Field(..., description="key is the key to project.") + mode: Optional[int] = Field( + default=None, + description="mode is Optional: mode bits used to set permissions on this file.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.", + ) + path: str = Field( + ..., + description="path is the relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'.", + ) + + +class ConfigMap1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + items: Optional[List[Item2]] = Field( + default=None, + description="items if unspecified, each key-value pair in the Data field of the referenced\nConfigMap will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the ConfigMap,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.", + ) + name: Optional[str] = Field( + default=None, + description="Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?", + ) + optional: Optional[bool] = Field( + default=None, + description="optional specify whether the ConfigMap or its keys must be defined", + ) + + +class Item3(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + fieldRef: Optional[FieldRef] = Field( + default=None, + description="Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.", + ) + mode: Optional[int] = Field( + default=None, + description="Optional: mode bits used to set permissions on this file, must be an octal value\nbetween 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.", + ) + path: str = Field( + ..., + description="Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'", + ) + resourceFieldRef: Optional[ResourceFieldRef] = Field( + default=None, + description="Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.", + ) + + +class DownwardAPI1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + items: Optional[List[Item3]] = Field( + default=None, description="Items is a list of DownwardAPIVolume file" + ) + + +class Item4(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + key: str = Field(..., description="key is the key to project.") + mode: Optional[int] = Field( + default=None, + description="mode is Optional: mode bits used to set permissions on this file.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nIf not specified, the volume defaultMode will be used.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.", + ) + path: str = Field( + ..., + description="path is the relative path of the file to map the key to.\nMay not be an absolute path.\nMay not contain the path element '..'.\nMay not start with the string '..'.", + ) + + +class Secret(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + items: Optional[List[Item4]] = Field( + default=None, + description="items if unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.", + ) + name: Optional[str] = Field( + default=None, + description="Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?", + ) + optional: Optional[bool] = Field( + default=None, + description="optional field specify whether the Secret or its key must be defined", + ) + + +class ServiceAccountToken(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + audience: Optional[str] = Field( + default=None, + description="audience is the intended audience of the token. A recipient of a token\nmust identify itself with an identifier specified in the audience of the\ntoken, and otherwise should reject the token. The audience defaults to the\nidentifier of the apiserver.", + ) + expirationSeconds: Optional[int] = Field( + default=None, + description="expirationSeconds is the requested duration of validity of the service\naccount token. As the token approaches expiration, the kubelet volume\nplugin will proactively rotate the service account token. The kubelet will\nstart trying to rotate the token if the token is older than 80 percent of\nits time to live or if the token is older than 24 hours.Defaults to 1 hour\nand must be at least 10 minutes.", + ) + path: str = Field( + ..., + description="path is the path relative to the mount point of the file to project the\ntoken into.", + ) + + +class Source(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + configMap: Optional[ConfigMap1] = Field( + default=None, + description="configMap information about the configMap data to project", + ) + downwardAPI: Optional[DownwardAPI1] = Field( + default=None, + description="downwardAPI information about the downwardAPI data to project", + ) + secret: Optional[Secret] = Field( + default=None, description="secret information about the secret data to project" + ) + serviceAccountToken: Optional[ServiceAccountToken] = Field( + default=None, + description="serviceAccountToken is information about the serviceAccountToken data to project", + ) + + +class Projected(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + defaultMode: Optional[int] = Field( + default=None, + description="defaultMode are the mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values for mode bits.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.", + ) + sources: Optional[List[Source]] = Field( + default=None, description="sources is the list of volume projections" + ) + + +class Quobyte(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + group: Optional[str] = Field( + default=None, description="group to map volume access to\nDefault is no group" + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly here will force the Quobyte volume to be mounted with read-only permissions.\nDefaults to false.", + ) + registry: str = Field( + ..., + description="registry represents a single or multiple Quobyte Registry services\nspecified as a string as host:port pair (multiple entries are separated with commas)\nwhich acts as the central registry for volumes", + ) + tenant: Optional[str] = Field( + default=None, + description="tenant owning the given Quobyte volume in the Backend\nUsed with dynamically provisioned Quobyte volumes, value is set by the plugin", + ) + user: Optional[str] = Field( + default=None, + description="user to map volume access to\nDefaults to serivceaccount user", + ) + volume: str = Field( + ..., + description="volume is a string that references an already created Quobyte volume by name.", + ) + + +class Rbd(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + fsType: Optional[str] = Field( + default=None, + description='fsType is the filesystem type of the volume that you want to mount.\nTip: Ensure that the filesystem type is supported by the host operating system.\nExamples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#rbd\nTODO: how do we prevent errors in the filesystem from compromising the machine', + ) + image: str = Field( + ..., + description="image is the rados image name.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + ) + keyring: Optional[str] = Field( + default=None, + description="keyring is the path to key ring for RBDUser.\nDefault is /etc/ceph/keyring.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + ) + monitors: List[str] = Field( + ..., + description="monitors is a collection of Ceph monitors.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + ) + pool: Optional[str] = Field( + default=None, + description="pool is the rados pool name.\nDefault is rbd.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly here will force the ReadOnly setting in VolumeMounts.\nDefaults to false.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + ) + secretRef: Optional[SecretRef3] = Field( + default=None, + description="secretRef is name of the authentication secret for RBDUser. If provided\noverrides keyring.\nDefault is nil.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + ) + user: Optional[str] = Field( + default=None, + description="user is the rados user name.\nDefault is admin.\nMore info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + ) + + +class ScaleIO(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + fsType: Optional[str] = Field( + default=None, + description='fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. "ext4", "xfs", "ntfs".\nDefault is "xfs".', + ) + gateway: str = Field( + ..., description="gateway is the host address of the ScaleIO API Gateway." + ) + protectionDomain: Optional[str] = Field( + default=None, + description="protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.", + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly Defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.", + ) + secretRef: SecretRef3 = Field( + ..., + description="secretRef references to the secret for ScaleIO user and other\nsensitive information. If this is not provided, Login operation will fail.", + ) + sslEnabled: Optional[bool] = Field( + default=None, + description="sslEnabled Flag enable/disable SSL communication with Gateway, default false", + ) + storageMode: Optional[str] = Field( + default=None, + description="storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.\nDefault is ThinProvisioned.", + ) + storagePool: Optional[str] = Field( + default=None, + description="storagePool is the ScaleIO Storage Pool associated with the protection domain.", + ) + system: str = Field( + ..., + description="system is the name of the storage system as configured in ScaleIO.", + ) + volumeName: Optional[str] = Field( + default=None, + description="volumeName is the name of a volume already created in the ScaleIO system\nthat is associated with this volume source.", + ) + + +class Secret1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + defaultMode: Optional[int] = Field( + default=None, + description="defaultMode is Optional: mode bits used to set permissions on created files by default.\nMust be an octal value between 0000 and 0777 or a decimal value between 0 and 511.\nYAML accepts both octal and decimal values, JSON requires decimal values\nfor mode bits. Defaults to 0644.\nDirectories within the path are not affected by this setting.\nThis might be in conflict with other options that affect the file\nmode, like fsGroup, and the result can be other mode bits set.", + ) + items: Optional[List[Item4]] = Field( + default=None, + description="items If unspecified, each key-value pair in the Data field of the referenced\nSecret will be projected into the volume as a file whose name is the\nkey and content is the value. If specified, the listed keys will be\nprojected into the specified paths, and unlisted keys will not be\npresent. If a key is specified which is not present in the Secret,\nthe volume setup will error unless it is marked optional. Paths must be\nrelative and may not contain the '..' path or start with '..'.", + ) + optional: Optional[bool] = Field( + default=None, + description="optional field specify whether the Secret or its keys must be defined", + ) + secretName: Optional[str] = Field( + default=None, + description="secretName is the name of the secret in the pod's namespace to use.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + ) + + +class Storageos(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + fsType: Optional[str] = Field( + default=None, + description='fsType is the filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.', + ) + readOnly: Optional[bool] = Field( + default=None, + description="readOnly defaults to false (read/write). ReadOnly here will force\nthe ReadOnly setting in VolumeMounts.", + ) + secretRef: Optional[SecretRef3] = Field( + default=None, + description="secretRef specifies the secret to use for obtaining the StorageOS API\ncredentials. If not specified, default values will be attempted.", + ) + volumeName: Optional[str] = Field( + default=None, + description="volumeName is the human-readable name of the StorageOS volume. Volume\nnames are only unique within a namespace.", + ) + volumeNamespace: Optional[str] = Field( + default=None, + description='volumeNamespace specifies the scope of the volume within StorageOS. If no\nnamespace is specified then the Pod\'s namespace will be used. This allows the\nKubernetes name scoping to be mirrored within StorageOS for tighter integration.\nSet VolumeName to any name to override the default behaviour.\nSet to "default" if you are not using namespaces within StorageOS.\nNamespaces that do not pre-exist within StorageOS will be created.', + ) + + +class VsphereVolume(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + fsType: Optional[str] = Field( + default=None, + description='fsType is filesystem type to mount.\nMust be a filesystem type supported by the host operating system.\nEx. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.', + ) + storagePolicyID: Optional[str] = Field( + default=None, + description="storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.", + ) + storagePolicyName: Optional[str] = Field( + default=None, + description="storagePolicyName is the storage Policy Based Management (SPBM) profile name.", + ) + volumePath: str = Field( + ..., description="volumePath is the path that identifies vSphere volume vmdk" + ) + + +class ExtraVolume(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + awsElasticBlockStore: Optional[AwsElasticBlockStore] = Field( + default=None, + description="awsElasticBlockStore represents an AWS Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + ) + azureDisk: Optional[AzureDisk] = Field( + default=None, + description="azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + ) + azureFile: Optional[AzureFile] = Field( + default=None, + description="azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + ) + cephfs: Optional[Cephfs] = Field( + default=None, + description="cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + ) + cinder: Optional[Cinder] = Field( + default=None, + description="cinder represents a cinder volume attached and mounted on kubelets host machine.\nMore info: https://examples.k8s.io/mysql-cinder-pd/README.md", + ) + configMap: Optional[ConfigMap] = Field( + default=None, + description="configMap represents a configMap that should populate this volume", + ) + csi: Optional[Csi] = Field( + default=None, + description="csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", + ) + downwardAPI: Optional[DownwardAPI] = Field( + default=None, + description="downwardAPI represents downward API about the pod that should populate this volume", + ) + emptyDir: Optional[EmptyDir] = Field( + default=None, + description="emptyDir represents a temporary directory that shares a pod's lifetime.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + ) + ephemeral: Optional[Ephemeral] = Field( + default=None, + description="ephemeral represents a volume that is handled by a cluster storage driver.\nThe volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,\nand deleted when the pod is removed.\n\n\nUse this if:\na) the volume is only needed while the pod runs,\nb) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and\nd) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\n\nUse PersistentVolumeClaim or one of the vendor-specific\nAPIs for volumes that persist for longer than the lifecycle\nof an individual pod.\n\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to\nbe used that way - see the documentation of the driver for\nmore information.\n\n\nA pod can use both types of ephemeral volumes and\npersistent volumes at the same time.", + ) + fc: Optional[Fc] = Field( + default=None, + description="fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + ) + flexVolume: Optional[FlexVolume] = Field( + default=None, + description="flexVolume represents a generic volume resource that is\nprovisioned/attached using an exec based plugin.", + ) + flocker: Optional[Flocker] = Field( + default=None, + description="flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + ) + gcePersistentDisk: Optional[GcePersistentDisk] = Field( + default=None, + description="gcePersistentDisk represents a GCE Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + ) + gitRepo: Optional[GitRepo] = Field( + default=None, + description="gitRepo represents a git repository at a particular revision.\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an\nEmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir\ninto the Pod's container.", + ) + glusterfs: Optional[Glusterfs] = Field( + default=None, + description="glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.\nMore info: https://examples.k8s.io/volumes/glusterfs/README.md", + ) + hostPath: Optional[HostPath] = Field( + default=None, + description="hostPath represents a pre-existing file or directory on the host\nmachine that is directly exposed to the container. This is generally\nused for system agents or other privileged things that are allowed\nto see the host machine. Most containers will NOT need this.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath\n---\nTODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not\nmount host directories as read/write.", + ) + iscsi: Optional[Iscsi] = Field( + default=None, + description="iscsi represents an ISCSI Disk resource that is attached to a\nkubelet's host machine and then exposed to the pod.\nMore info: https://examples.k8s.io/volumes/iscsi/README.md", + ) + name: str = Field( + ..., + description="name of the volume.\nMust be a DNS_LABEL and unique within the pod.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + ) + nfs: Optional[Nfs] = Field( + default=None, + description="nfs represents an NFS mount on the host that shares a pod's lifetime\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + ) + persistentVolumeClaim: Optional[PersistentVolumeClaim] = Field( + default=None, + description="persistentVolumeClaimVolumeSource represents a reference to a\nPersistentVolumeClaim in the same namespace.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + ) + photonPersistentDisk: Optional[PhotonPersistentDisk] = Field( + default=None, + description="photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + ) + portworxVolume: Optional[PortworxVolume] = Field( + default=None, + description="portworxVolume represents a portworx volume attached and mounted on kubelets host machine", + ) + projected: Optional[Projected] = Field( + default=None, + description="projected items for all in one resources secrets, configmaps, and downward API", + ) + quobyte: Optional[Quobyte] = Field( + default=None, + description="quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + ) + rbd: Optional[Rbd] = Field( + default=None, + description="rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.\nMore info: https://examples.k8s.io/volumes/rbd/README.md", + ) + scaleIO: Optional[ScaleIO] = Field( + default=None, + description="scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + ) + secret: Optional[Secret1] = Field( + default=None, + description="secret represents a secret that should populate this volume.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + ) + storageos: Optional[Storageos] = Field( + default=None, + description="storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + ) + vsphereVolume: Optional[VsphereVolume] = Field( + default=None, + description="vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + ) + + +class TlsSecret(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + adopt: Optional[bool] = Field( + default=None, + description="If the secret is adopted then the operator will delete the secret when the custom resource that uses it is deleted.", + ) + name: str + + +class Ingress(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + annotations: Optional[Dict[str, str]] = None + host: str + ingressClassName: Optional[str] = None + tlsSecret: Optional[TlsSecret] = Field( + default=None, + description="The name of the TLS secret, same as what is specified in a regular Kubernetes Ingress.", + ) + + +class ValueFrom1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + configMapKeyRef: Optional[ConfigMapKeyRef] = Field( + default=None, description="Selects a key of a ConfigMap." + ) + fieldRef: Optional[FieldRef] = Field( + default=None, + description="Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,\nspec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.", + ) + resourceFieldRef: Optional[ResourceFieldRef] = Field( + default=None, + description="Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.", + ) + secretKeyRef: Optional[SecretKeyRef] = Field( + default=None, description="Selects a key of a secret in the pod's namespace" + ) + + +class EnvItem1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + name: str = Field( + ..., description="Name of the environment variable. Must be a C_IDENTIFIER." + ) + value: Optional[str] = Field( + default=None, + description='Variable references $(VAR_NAME) are expanded\nusing the previously defined environment variables in the container and\nany service environment variables. If a variable cannot be resolved,\nthe reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.\n"$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".\nEscaped references will never be expanded, regardless of whether the variable\nexists or not.\nDefaults to "".', + ) + valueFrom: Optional[ValueFrom1] = Field( + default=None, + description="Source for the environment variable's value. Cannot be used if value is not empty.", + ) + + +class SecretRef10(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + name: Optional[str] = Field( + default=None, + description="Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\nTODO: Add other useful fields. apiVersion, kind, uid?", + ) + optional: Optional[bool] = Field( + default=None, description="Specify whether the Secret must be defined" + ) + + +class EnvFromItem1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + configMapRef: Optional[ConfigMapRef] = Field( + default=None, description="The ConfigMap to select from" + ) + prefix: Optional[str] = Field( + default=None, + description="An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.", + ) + secretRef: Optional[SecretRef10] = Field( + default=None, description="The Secret to select from" + ) + + +class HttpGet5(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + host: Optional[str] = Field( + default=None, + description='Host name to connect to, defaults to the pod IP. You probably want to set\n"Host" in httpHeaders instead.', + ) + httpHeaders: Optional[List[HttpHeader]] = Field( + default=None, + description="Custom headers to set in the request. HTTP allows repeated headers.", + ) + path: Optional[str] = Field( + default=None, description="Path to access on the HTTP server." + ) + port: Union[int, str] = Field( + ..., + description="Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME.", + ) + scheme: Optional[str] = Field( + default=None, + description="Scheme to use for connecting to the host.\nDefaults to HTTP.", + ) + + +class PostStart1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + exec: Optional[Exec] = Field( + default=None, description="Exec specifies the action to take." + ) + httpGet: Optional[HttpGet5] = Field( + default=None, description="HTTPGet specifies the http request to perform." + ) + tcpSocket: Optional[TcpSocket] = Field( + default=None, + description="Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified.", + ) + + +class HttpGet6(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + host: Optional[str] = Field( + default=None, + description='Host name to connect to, defaults to the pod IP. You probably want to set\n"Host" in httpHeaders instead.', + ) + httpHeaders: Optional[List[HttpHeader]] = Field( + default=None, + description="Custom headers to set in the request. HTTP allows repeated headers.", + ) + path: Optional[str] = Field( + default=None, description="Path to access on the HTTP server." + ) + port: Union[int, str] = Field( + ..., + description="Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME.", + ) + scheme: Optional[str] = Field( + default=None, + description="Scheme to use for connecting to the host.\nDefaults to HTTP.", + ) + + +class PreStop1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + exec: Optional[Exec] = Field( + default=None, description="Exec specifies the action to take." + ) + httpGet: Optional[HttpGet6] = Field( + default=None, description="HTTPGet specifies the http request to perform." + ) + tcpSocket: Optional[TcpSocket] = Field( + default=None, + description="Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept\nfor the backward compatibility. There are no validation of this field and\nlifecycle hooks will fail in runtime when tcp handler is specified.", + ) + + +class Lifecycle1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + postStart: Optional[PostStart1] = Field( + default=None, + description="PostStart is called immediately after a container is created. If the handler fails,\nthe container is terminated and restarted according to its restart policy.\nOther management of the container blocks until the hook completes.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + ) + preStop: Optional[PreStop1] = Field( + default=None, + description="PreStop is called immediately before a container is terminated due to an\nAPI request or management event such as liveness/startup probe failure,\npreemption, resource contention, etc. The handler is not called if the\ncontainer crashes or exits. The Pod's termination grace period countdown begins before the\nPreStop hook is executed. Regardless of the outcome of the handler, the\ncontainer will eventually terminate within the Pod's termination grace\nperiod (unless delayed by finalizers). Other management of the container blocks until the hook completes\nor until the termination grace period is reached.\nMore info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + ) + + +class HttpGet7(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + host: Optional[str] = Field( + default=None, + description='Host name to connect to, defaults to the pod IP. You probably want to set\n"Host" in httpHeaders instead.', + ) + httpHeaders: Optional[List[HttpHeader]] = Field( + default=None, + description="Custom headers to set in the request. HTTP allows repeated headers.", + ) + path: Optional[str] = Field( + default=None, description="Path to access on the HTTP server." + ) + port: Union[int, str] = Field( + ..., + description="Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME.", + ) + scheme: Optional[str] = Field( + default=None, + description="Scheme to use for connecting to the host.\nDefaults to HTTP.", + ) + + +class LivenessProbe1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + exec: Optional[Exec] = Field( + default=None, description="Exec specifies the action to take." + ) + failureThreshold: Optional[int] = Field( + default=None, + description="Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1.", + ) + grpc: Optional[Grpc] = Field( + default=None, description="GRPC specifies an action involving a GRPC port." + ) + httpGet: Optional[HttpGet7] = Field( + default=None, description="HTTPGet specifies the http request to perform." + ) + initialDelaySeconds: Optional[int] = Field( + default=None, + description="Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + periodSeconds: Optional[int] = Field( + default=None, + description="How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1.", + ) + successThreshold: Optional[int] = Field( + default=None, + description="Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", + ) + tcpSocket: Optional[TcpSocket] = Field( + default=None, description="TCPSocket specifies an action involving a TCP port." + ) + terminationGracePeriodSeconds: Optional[int] = Field( + default=None, + description="Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset.", + ) + timeoutSeconds: Optional[int] = Field( + default=None, + description="Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + + +class HttpGet8(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + host: Optional[str] = Field( + default=None, + description='Host name to connect to, defaults to the pod IP. You probably want to set\n"Host" in httpHeaders instead.', + ) + httpHeaders: Optional[List[HttpHeader]] = Field( + default=None, + description="Custom headers to set in the request. HTTP allows repeated headers.", + ) + path: Optional[str] = Field( + default=None, description="Path to access on the HTTP server." + ) + port: Union[int, str] = Field( + ..., + description="Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME.", + ) + scheme: Optional[str] = Field( + default=None, + description="Scheme to use for connecting to the host.\nDefaults to HTTP.", + ) + + +class ReadinessProbe1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + exec: Optional[Exec] = Field( + default=None, description="Exec specifies the action to take." + ) + failureThreshold: Optional[int] = Field( + default=None, + description="Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1.", + ) + grpc: Optional[Grpc] = Field( + default=None, description="GRPC specifies an action involving a GRPC port." + ) + httpGet: Optional[HttpGet8] = Field( + default=None, description="HTTPGet specifies the http request to perform." + ) + initialDelaySeconds: Optional[int] = Field( + default=None, + description="Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + periodSeconds: Optional[int] = Field( + default=None, + description="How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1.", + ) + successThreshold: Optional[int] = Field( + default=None, + description="Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", + ) + tcpSocket: Optional[TcpSocket] = Field( + default=None, description="TCPSocket specifies an action involving a TCP port." + ) + terminationGracePeriodSeconds: Optional[int] = Field( + default=None, + description="Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset.", + ) + timeoutSeconds: Optional[int] = Field( + default=None, + description="Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + + +class Resources2(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + claims: Optional[List[Claim]] = Field( + default=None, + description="Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable. It can only be set for containers.", + ) + limits: Optional[Dict[str, Union[int, str]]] = Field( + default=None, + description="Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + ) + requests: Optional[Dict[str, Union[int, str]]] = Field( + default=None, + description="Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + ) + + +class SecurityContext1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + allowPrivilegeEscalation: Optional[bool] = Field( + default=None, + description="AllowPrivilegeEscalation controls whether a process can gain more\nprivileges than its parent process. This bool directly controls if\nthe no_new_privs flag will be set on the container process.\nAllowPrivilegeEscalation is true always when the container is:\n1) run as Privileged\n2) has CAP_SYS_ADMIN\nNote that this field cannot be set when spec.os.name is windows.", + ) + capabilities: Optional[Capabilities] = Field( + default=None, + description="The capabilities to add/drop when running containers.\nDefaults to the default set of capabilities granted by the container runtime.\nNote that this field cannot be set when spec.os.name is windows.", + ) + privileged: Optional[bool] = Field( + default=None, + description="Run container in privileged mode.\nProcesses in privileged containers are essentially equivalent to root on the host.\nDefaults to false.\nNote that this field cannot be set when spec.os.name is windows.", + ) + procMount: Optional[str] = Field( + default=None, + description="procMount denotes the type of proc mount to use for the containers.\nThe default is DefaultProcMount which uses the container runtime defaults for\nreadonly paths and masked paths.\nThis requires the ProcMountType feature flag to be enabled.\nNote that this field cannot be set when spec.os.name is windows.", + ) + readOnlyRootFilesystem: Optional[bool] = Field( + default=None, + description="Whether this container has a read-only root filesystem.\nDefault is false.\nNote that this field cannot be set when spec.os.name is windows.", + ) + runAsGroup: Optional[int] = Field( + default=None, + description="The GID to run the entrypoint of the container process.\nUses runtime default if unset.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows.", + ) + runAsNonRoot: Optional[bool] = Field( + default=None, + description="Indicates that the container must run as a non-root user.\nIf true, the Kubelet will validate the image at runtime to ensure that it\ndoes not run as UID 0 (root) and fail to start the container if it does.\nIf unset or false, no such validation will be performed.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.", + ) + runAsUser: Optional[int] = Field( + default=None, + description="The UID to run the entrypoint of the container process.\nDefaults to user specified in image metadata if unspecified.\nMay also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows.", + ) + seLinuxOptions: Optional[SeLinuxOptions] = Field( + default=None, + description="The SELinux context to be applied to the container.\nIf unspecified, the container runtime will allocate a random SELinux context for each\ncontainer. May also be set in PodSecurityContext. If set in both SecurityContext and\nPodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is windows.", + ) + seccompProfile: Optional[SeccompProfile] = Field( + default=None, + description="The seccomp options to use by this container. If seccomp options are\nprovided at both the pod & container level, the container options\noverride the pod options.\nNote that this field cannot be set when spec.os.name is windows.", + ) + windowsOptions: Optional[WindowsOptions] = Field( + default=None, + description="The Windows specific settings applied to all containers.\nIf unspecified, the options from the PodSecurityContext will be used.\nIf set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.\nNote that this field cannot be set when spec.os.name is linux.", + ) + + +class HttpGet9(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + host: Optional[str] = Field( + default=None, + description='Host name to connect to, defaults to the pod IP. You probably want to set\n"Host" in httpHeaders instead.', + ) + httpHeaders: Optional[List[HttpHeader]] = Field( + default=None, + description="Custom headers to set in the request. HTTP allows repeated headers.", + ) + path: Optional[str] = Field( + default=None, description="Path to access on the HTTP server." + ) + port: Union[int, str] = Field( + ..., + description="Name or number of the port to access on the container.\nNumber must be in the range 1 to 65535.\nName must be an IANA_SVC_NAME.", + ) + scheme: Optional[str] = Field( + default=None, + description="Scheme to use for connecting to the host.\nDefaults to HTTP.", + ) + + +class StartupProbe1(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + exec: Optional[Exec] = Field( + default=None, description="Exec specifies the action to take." + ) + failureThreshold: Optional[int] = Field( + default=None, + description="Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1.", + ) + grpc: Optional[Grpc] = Field( + default=None, description="GRPC specifies an action involving a GRPC port." + ) + httpGet: Optional[HttpGet9] = Field( + default=None, description="HTTPGet specifies the http request to perform." + ) + initialDelaySeconds: Optional[int] = Field( + default=None, + description="Number of seconds after the container has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + periodSeconds: Optional[int] = Field( + default=None, + description="How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1.", + ) + successThreshold: Optional[int] = Field( + default=None, + description="Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", + ) + tcpSocket: Optional[TcpSocket] = Field( + default=None, description="TCPSocket specifies an action involving a TCP port." + ) + terminationGracePeriodSeconds: Optional[int] = Field( + default=None, + description="Optional duration in seconds the pod needs to terminate gracefully upon probe failure.\nThe grace period is the duration in seconds after the processes running in the pod are sent\na termination signal and the time when the processes are forcibly halted with a kill signal.\nSet this value longer than the expected cleanup time for your process.\nIf this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this\nvalue overrides the value provided by the pod spec.\nValue must be non-negative integer. The value zero indicates stop immediately via\nthe kill signal (no opportunity to shut down).\nThis is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.\nMinimum value is 1. spec.terminationGracePeriodSeconds is used if unset.", + ) + timeoutSeconds: Optional[int] = Field( + default=None, + description="Number of seconds after which the probe times out.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + + +class InitContainer(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + args: Optional[List[str]] = Field( + default=None, + description='Arguments to the entrypoint.\nThe container image\'s CMD is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container\'s environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will\nproduce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell', + ) + command: Optional[List[str]] = Field( + default=None, + description='Entrypoint array. Not executed within a shell.\nThe container image\'s ENTRYPOINT is used if this is not provided.\nVariable references $(VAR_NAME) are expanded using the container\'s environment. If a variable\ncannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will\nproduce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless\nof whether the variable exists or not. Cannot be updated.\nMore info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell', + ) + env: Optional[List[EnvItem1]] = Field( + default=None, + description="List of environment variables to set in the container.\nCannot be updated.", + ) + envFrom: Optional[List[EnvFromItem1]] = Field( + default=None, + description="List of sources to populate environment variables in the container.\nThe keys defined within a source must be a C_IDENTIFIER. All invalid keys\nwill be reported as an event when the container is starting. When a key exists in multiple\nsources, the value associated with the last source will take precedence.\nValues defined by an Env with a duplicate key will take precedence.\nCannot be updated.", + ) + image: Optional[str] = Field( + default=None, + description="Container image name.\nMore info: https://kubernetes.io/docs/concepts/containers/images\nThis field is optional to allow higher level config management to default or override\ncontainer images in workload controllers like Deployments and StatefulSets.", + ) + imagePullPolicy: Optional[str] = Field( + default=None, + description="Image pull policy.\nOne of Always, Never, IfNotPresent.\nDefaults to Always if :latest tag is specified, or IfNotPresent otherwise.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + ) + lifecycle: Optional[Lifecycle1] = Field( + default=None, + description="Actions that the management system should take in response to container lifecycle events.\nCannot be updated.", + ) + livenessProbe: Optional[LivenessProbe1] = Field( + default=None, + description="Periodic probe of container liveness.\nContainer will be restarted if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + name: str = Field( + ..., + description="Name of the container specified as a DNS_LABEL.\nEach container in a pod must have a unique name (DNS_LABEL).\nCannot be updated.", + ) + ports: Optional[List[Port]] = Field( + default=None, + description='List of ports to expose from the container. Not specifying a port here\nDOES NOT prevent that port from being exposed. Any port which is\nlistening on the default "0.0.0.0" address inside a container will be\naccessible from the network.\nModifying this array with strategic merge patch may corrupt the data.\nFor more information See https://github.com/kubernetes/kubernetes/issues/108255.\nCannot be updated.', + ) + readinessProbe: Optional[ReadinessProbe1] = Field( + default=None, + description="Periodic probe of container service readiness.\nContainer will be removed from service endpoints if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + resizePolicy: Optional[List[ResizePolicyItem]] = Field( + default=None, description="Resources resize policy for the container." + ) + resources: Optional[Resources2] = Field( + default=None, + description="Compute Resources required by this container.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + ) + restartPolicy: Optional[str] = Field( + default=None, + description='RestartPolicy defines the restart behavior of individual containers in a pod.\nThis field may only be set for init containers, and the only allowed value is "Always".\nFor non-init containers or when this field is not specified,\nthe restart behavior is defined by the Pod\'s restart policy and the container type.\nSetting the RestartPolicy as "Always" for the init container will have the following effect:\nthis init container will be continually restarted on\nexit until all regular containers have terminated. Once all regular\ncontainers have completed, all init containers with restartPolicy "Always"\nwill be shut down. This lifecycle differs from normal init containers and\nis often referred to as a "sidecar" container. Although this init\ncontainer still starts in the init container sequence, it does not wait\nfor the container to complete before proceeding to the next init\ncontainer. Instead, the next init container starts immediately after this\ninit container is started, or after any startupProbe has successfully\ncompleted.', + ) + securityContext: Optional[SecurityContext1] = Field( + default=None, + description="SecurityContext defines the security options the container should be run with.\nIf set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + ) + startupProbe: Optional[StartupProbe1] = Field( + default=None, + description="StartupProbe indicates that the Pod has successfully initialized.\nIf specified, no other probes are executed until this completes successfully.\nIf this probe fails, the Pod will be restarted, just as if the livenessProbe failed.\nThis can be used to provide different probe parameters at the beginning of a Pod's lifecycle,\nwhen it might take a long time to load data or warm a cache, than during steady-state operation.\nThis cannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + ) + stdin: Optional[bool] = Field( + default=None, + description="Whether this container should allocate a buffer for stdin in the container runtime. If this\nis not set, reads from stdin in the container will always result in EOF.\nDefault is false.", + ) + stdinOnce: Optional[bool] = Field( + default=None, + description="Whether the container runtime should close the stdin channel after it has been opened by\na single attach. When stdin is true the stdin stream will remain open across multiple attach\nsessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the\nfirst client attaches to stdin, and then remains open and accepts data until the client disconnects,\nat which time stdin is closed and remains closed until the container is restarted. If this\nflag is false, a container processes that reads from stdin will never receive an EOF.\nDefault is false", + ) + terminationMessagePath: Optional[str] = Field( + default=None, + description="Optional: Path at which the file to which the container's termination message\nwill be written is mounted into the container's filesystem.\nMessage written is intended to be brief final status, such as an assertion failure message.\nWill be truncated by the node if greater than 4096 bytes. The total message length across\nall containers will be limited to 12kb.\nDefaults to /dev/termination-log.\nCannot be updated.", + ) + terminationMessagePolicy: Optional[str] = Field( + default=None, + description="Indicate how the termination message should be populated. File will use the contents of\nterminationMessagePath to populate the container status message on both success and failure.\nFallbackToLogsOnError will use the last chunk of container log output if the termination\nmessage file is empty and the container exited with an error.\nThe log output is limited to 2048 bytes or 80 lines, whichever is smaller.\nDefaults to File.\nCannot be updated.", + ) + tty: Optional[bool] = Field( + default=None, + description="Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.\nDefault is false.", + ) + volumeDevices: Optional[List[VolumeDevice]] = Field( + default=None, + description="volumeDevices is the list of block devices to be used by the container.", + ) + volumeMounts: Optional[List[VolumeMount]] = Field( + default=None, + description="Pod volumes to mount into the container's filesystem.\nCannot be updated.", + ) + workingDir: Optional[str] = Field( + default=None, + description="Container's working directory.\nIf not specified, the container runtime's default will be used, which\nmight be configured in the container image.\nCannot be updated.", + ) + + +class ValueFrom2(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + configMapKeyRef: Optional[ConfigMapKeyRef] = Field( + default=None, description="Selects a key of a ConfigMap." + ) + fieldRef: Optional[FieldRef] = Field( + default=None, + description="Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,\nspec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.", + ) + resourceFieldRef: Optional[ResourceFieldRef] = Field( + default=None, + description="Selects a resource of the container: only resources limits and requests\n(limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.", + ) + secretKeyRef: Optional[SecretKeyRef] = Field( + default=None, description="Selects a key of a secret in the pod's namespace" + ) + + +class EnvItem2(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + name: str = Field( + ..., description="Name of the environment variable. Must be a C_IDENTIFIER." + ) + value: Optional[str] = Field( + default=None, + description='Variable references $(VAR_NAME) are expanded\nusing the previously defined environment variables in the container and\nany service environment variables. If a variable cannot be resolved,\nthe reference in the input string will be unchanged. Double $$ are reduced\nto a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.\n"$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".\nEscaped references will never be expanded, regardless of whether the variable\nexists or not.\nDefaults to "".', + ) + valueFrom: Optional[ValueFrom2] = Field( + default=None, + description="Source for the environment variable's value. Cannot be used if value is not empty.", + ) + + +class Resources3(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + claims: Optional[List[Claim]] = Field( + default=None, + description="Claims lists the names of resources, defined in spec.resourceClaims,\nthat are used by this container.\n\n\nThis is an alpha field and requires enabling the\nDynamicResourceAllocation feature gate.\n\n\nThis field is immutable. It can only be set for containers.", + ) + limits: Optional[Dict[str, Union[int, str]]] = Field( + default=None, + description="Limits describes the maximum amount of compute resources allowed.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + ) + requests: Optional[Dict[str, Union[int, str]]] = Field( + default=None, + description="Requests describes the minimum amount of compute resources required.\nIf Requests is omitted for a container, it defaults to Limits if that is explicitly specified,\notherwise to an implementation-defined value. Requests cannot exceed Limits.\nMore info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + ) + + +class Storage(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + className: Optional[str] = None + mountPath: str = Field( + default="/workspace", + description="The absolute mount path for the session volume", + ) + size: Union[int, str] = "1Gi" + + +class Session(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + args: Optional[List[str]] = Field( + default=None, + description="The arguments to run in the session container, if omitted it will use the Docker image CMD", + ) + command: Optional[List[str]] = Field( + default=None, + description="The command to run in the session container, if omitted it will use the Docker image ENTRYPOINT", + ) + env: Optional[List[EnvItem2]] = None + extraVolumeMounts: Optional[List[ExtraVolumeMount]] = Field( + default=None, description="Additional volume mounts for the session container" + ) + image: str + port: int = Field( + ..., + description="The TCP port on the pod where the session can be accessed.\nIf the session has authentication enabled then the ingress and service will point to the authentication container\nand the authentication proxy container will proxy to this port. If authentication is disabled then the ingress and service\nroute directly to this port. Note that renku reserves the highest TCP value 65535 to run the authentication proxy.", + gt=0, + lt=65535, + ) + resources: Optional[Resources3] = Field( + default=None, + description="Resource requirements and limits in the same format as a Pod in Kubernetes", + ) + runAsGroup: int = Field( + default=1000, + description="The group is set on the session and this value is also set as the fsgroup for the whole pod and all session\ncontianers.", + ge=0, + ) + runAsUser: int = Field(default=1000, ge=0) + shmSize: Optional[Union[int, str]] = Field( + default=None, description="Size of /dev/shm" + ) + storage: Storage = {} + urlPath: str = Field( + default="/", + description="The path where the session can be accessed. If an ingress is enabled then this will be\nthe path prefix for the ingress.", + ) + workingDir: Optional[str] = Field( + default=None, + description="The abolute path for the working directory of the session container, if omitted it will use the image\nworking directory.", + ) + + +class Spec(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + authentication: Optional[Authentication] = Field( + default=None, description="Authentication configuration for the session" + ) + codeRepositories: Optional[List[CodeRepository]] = Field( + default=None, + description="A list of code repositories and associated configuration that will be cloned in the session", + ) + culling: Optional[Culling] = Field( + default=None, description="Culling configuration" + ) + dataSources: Optional[List[DataSource]] = Field( + default=None, + description="A list of data sources that should be added to the session", + ) + extraContainers: Optional[List[ExtraContainer]] = Field( + default=None, + description="Additional containers to add to the session statefulset.\nNOTE: The container names provided will be partially overwritten and randomized to avoid collisions", + ) + extraVolumes: Optional[List[ExtraVolume]] = Field( + default=None, + description="Additional volumes to include in the statefulset for a session", + ) + hibernated: bool = Field( + ..., + description="Will hibernate the session, scaling the session's statefulset to zero.", + ) + ingress: Optional[Ingress] = Field( + default=None, + description="Configuration for an ingress to the session, if omitted a Kubernetes Ingress will not be created", + ) + initContainers: Optional[List[InitContainer]] = Field( + default=None, + description="Additional init containers to add to the session statefulset\nNOTE: The container names provided will be partially overwritten and randomized to avoid collisions", + ) + session: Session = Field( + ..., + description="Specification for the main session container that the user will access and use", + ) + + +class Condition(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + lastTransitionTime: Optional[datetime] = None + message: Optional[str] = None + reason: Optional[str] = None + status: str + type: str + + +class ContainerCounts(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + ready: Optional[int] = None + total: Optional[int] = None + + +class InitContainerCounts(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + ready: Optional[int] = None + total: Optional[int] = None + + +class State(Enum): + Running = "Running" + Failed = "Failed" + Hibernated = "Hibernated" + NotReady = "NotReady" + RunningDegraded = "RunningDegraded" + + +class Status(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + conditions: Optional[List[Condition]] = Field( + default=None, + description="Conditions store the status conditions of the AmaltheaSessions. This is a standard thing that\nmany operators implement see https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties", + ) + containerCounts: Optional[ContainerCounts] = Field( + default=None, + description="Counts of the total and ready containers, can represent either regular or init containers.", + ) + failingSince: Optional[datetime] = None + hibernatedSince: Optional[datetime] = None + idle: bool = False + idleSince: Optional[datetime] = None + initContainerCounts: Optional[InitContainerCounts] = Field( + default=None, + description="Counts of the total and ready containers, can represent either regular or init containers.", + ) + state: State = "NotReady" + url: Optional[str] = None + + +class Model(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + apiVersion: Optional[str] = Field( + default=None, + description="APIVersion defines the versioned schema of this representation of an object.\nServers should convert recognized schemas to the latest internal value, and\nmay reject unrecognized values.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + ) + kind: Optional[str] = Field( + default=None, + description="Kind is a string value representing the REST resource this object represents.\nServers may infer this from the endpoint the client submits requests to.\nCannot be updated.\nIn CamelCase.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + ) + metadata: Optional[Dict[str, Any]] = None + spec: Optional[Spec] = Field( + default=None, + description="AmaltheaSessionSpec defines the desired state of AmaltheaSession", + ) + status: Status = Field( + default={}, + description="AmaltheaSessionStatus defines the observed state of AmaltheaSession", + ) diff --git a/components/renku_data_services/notebooks/cr_base.py b/components/renku_data_services/notebooks/cr_base.py new file mode 100644 index 000000000..ec75db447 --- /dev/null +++ b/components/renku_data_services/notebooks/cr_base.py @@ -0,0 +1,12 @@ +"""Base models for K8s CRD specifications.""" + +from pydantic import BaseModel + + +class BaseCRD(BaseModel): + """Base CRD specification.""" + + class Config: + """Do not exclude unknown properties.""" + + extra = "allow" diff --git a/components/renku_data_services/notebooks/cr_jupyter_server.py b/components/renku_data_services/notebooks/cr_jupyter_server.py new file mode 100644 index 000000000..f4830e13e --- /dev/null +++ b/components/renku_data_services/notebooks/cr_jupyter_server.py @@ -0,0 +1,213 @@ +# generated by datamodel-codegen: +# filename: +# timestamp: 2024-09-04T22:45:30+00:00 + +from __future__ import annotations + +from enum import Enum +from typing import Any, Dict, List, Optional + +from pydantic import ConfigDict, Field +from renku_data_services.notebooks.cr_base import BaseCRD + + +class Oidc(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + authorizedEmails: List[str] = Field( + default=[], + description='List of users (identified by Email address read from the "email" OIDC claim) which are allowed to access this Jupyter session. This list is stored as a file and passed to the `--authenticated-emails-file` option (see https://oauth2-proxy.github.io/oauth2-proxy/docs/configuration/overview#command-line-options).', + ) + authorizedGroups: List[str] = Field( + default=[], + description='List of groups of users (read from the "groups" OIDC claim) which are allowed to access this Jupyter session. This list passed to the `--allowed-group` option (see https://oauth2-proxy.github.io/oauth2-proxy/docs/configuration/overview#command-line-options).', + ) + clientId: Optional[str] = Field( + default=None, + description="The client id of the application registered with the OIDC provider, see `--client-id` here: https://oauth2-proxy.github.io/oauth2-proxy/docs/configuration/overview/#command-line-options", + ) + enabled: bool = False + issuerUrl: Optional[str] = Field( + default=None, + description="Issuer URL of the OIDC provider, see `--oidc-issuer-url` here: https://oauth2-proxy.github.io/oauth2-proxy/docs/configuration/overview/#command-line-options", + ) + + +class Auth(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + oidc: Oidc = Field( + default={}, + description="Configuration for an OpenID connect provider to be used for access control to the jupyter server. Useful information can be found in the oauth2 proxy docs: https://oauth2-proxy.github.io/oauth2-proxy/docs/configuration/overview/", + ) + token: Optional[str] = Field( + default=None, + description='A token that will be passed to the `--ServerApp.token` option when running the Jupyter server and needed when first accessing the Jupyter server. The options are:\n\n - By leaving this field empty, a token will be autogenerated and\nadded under the key `ServerApp.token` to the secret which is created as a child of the custom resource object.\n\n - Setting the token to an empty string "" runs the Jupyter server\ncontainer itself without any authentication. This is recommended when enabling OIDC as authentication and authorization are then handled by the dedicated plugins.\n\n - Set an actual value here. Note that this string will be stored\nin clear text as part of the custom resource object. This option is mostly useful for dev purposes.', + ) + + +class Culling(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + maxAgeSecondsThreshold: int = Field( + default=0, + description="The maximum allowed age for a session, regardless of whether it is active or not. A value of zero indicates that the server cannot be culled due to its age.", + ge=0, + ) + idleSecondsThreshold: int = Field( + default=0, + description="How long should a server be idle for before it is culled. A value of zero indicates that the server should never be culled for inactivity.", + ge=0, + ) + startingSecondsThreshold: int = Field( + default=0, + description="How long can a server be in starting state before it gets culled. A value of zero indicates that the server cannot be culled due to starting too long.", + ge=0, + ) + failedSecondsThreshold: int = Field( + default=0, + description="How long can a server be in failed state before it gets culled. A value of zero indicates that the server cannot be culled due to failing.", + ge=0, + ) + hibernatedSecondsThreshold: int = Field( + default=0, + description="Number of seconds where a server can be in hibernated state before it gets culled. A value of zero indicates that hibernated servers cannot be culled.", + ge=0, + ) + + +class JupyterServer(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + defaultUrl: str = Field( + default="/lab", + description="The default URL to redirect to from '/'. Frequently used values are '/lab' or '/rstudio'. Translates to `--ServerApp.default_url`.", + ) + hibernated: bool = Field( + default=False, description="Whether the server is hibernated or not." + ) + image: str = "jupyter/minimal-notebook:latest" + resources: Dict[str, Any] = Field( + default={}, + description="Regular K8s resource requests, will be set on the main notebook container.", + ) + rootDir: str = Field( + default="/home/jovyan/work", + description="The absolute path to the root/notebook directory for the jupyter server. Should lead to a subdirectory of or match the path at storage.pvc.mountPath. Translates to `--ServerApp.root_dir`.", + ) + + +class Type(Enum): + application_json_patch_json = "application/json-patch+json" + application_merge_patch_json = "application/merge-patch+json" + + +class Patch(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + patch: Optional[Any] = None + type: Optional[Type] = None + + +class Tls(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + enabled: bool = False + secretName: Optional[str] = Field( + default=None, + description="The name of the K8s TLS secret. Might be pre-existing in the cluster or created under that name by a tool like cert manager when needed.", + ) + + +class Routing(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + host: Optional[str] = Field( + default=None, + description="Host under which the server will be available (eg myserver.example.com), should not include the schema.", + ) + ingressAnnotations: Dict[str, Any] = {} + path: str = Field( + default="/", description="Optionally make the server available under some path." + ) + tls: Tls = Field( + default={}, description="Settings for defining TLS termination by the ingress." + ) + + +class Pvc(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + enabled: bool = Field( + default=False, + description="Whether a PVC should be used to back the session. Defaults to 'false' in which case an emptyDir volume will be used.", + ) + mountPath: str = Field( + default="/home/jovyan/work", + description="The absolute path to the location where the PVC should be mounted in the user session pod.", + ) + storageClassName: Optional[str] = Field( + default=None, + description="Storage class to be used for the PVC. If left empty, the default storage class defined for the cluster will be used.", + ) + + +class Storage(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + pvc: Pvc = {} + size: Any = Field( + default="100Mi", + description="Size of the PVC or sizeLimit of the emptyDir volume which backs the session respectively.", + ) + + +class Spec(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + auth: Auth = Field( + default={}, + description="Settings defining access control to the jupyter server.", + ) + culling: Culling = Field( + default={}, description="Options about culling idle servers" + ) + jupyterServer: JupyterServer = Field( + default={}, + description="Configuration options (such as image to run) for the Jupyter server. See also https://jupyter-server.readthedocs.io/en/latest/other/full-config.html", + ) + patches: List[Patch] = Field( + default=[], + description="Patches to be applied to the created child resources after template rendering. Currently json patches and json merge patches are supported.", + ) + routing: Routing = Field( + default={}, + description="Settings related to how the jupyter server will be exposed outside of the cluster.", + ) + storage: Storage = Field( + default={}, description="Settings to define storage to back the jupyter server." + ) + + +class Model(BaseCRD): + model_config = ConfigDict( + extra="allow", + ) + spec: Optional[Spec] = Field( + default=None, + description="User defined specification for a JupyterServer custom resource.", + ) + status: Dict[str, Any] = Field( + default={"children": {}, "mainPod": {}}, + description="A field for Jupyter Server status information, do not modify.", + ) diff --git a/components/renku_data_services/notebooks/crs.py b/components/renku_data_services/notebooks/crs.py new file mode 100644 index 000000000..1a548ff75 --- /dev/null +++ b/components/renku_data_services/notebooks/crs.py @@ -0,0 +1,229 @@ +"""Custom resource definition with proper names from the autogenerated code.""" + +from datetime import datetime +from typing import Any, cast +from urllib.parse import urljoin, urlparse, urlunparse + +from kubernetes.utils import parse_quantity +from pydantic import BaseModel, Field, field_validator +from sanic.log import logger +from ulid import ULID + +from renku_data_services.errors import errors +from renku_data_services.notebooks import apispec +from renku_data_services.notebooks.cr_amalthea_session import ( + Authentication, + CodeRepository, + Culling, + DataSource, + ExtraContainer, + ExtraVolume, + ExtraVolumeMount, + Ingress, + InitContainer, + SecretRef, + Session, + State, + Storage, + TlsSecret, +) +from renku_data_services.notebooks.cr_amalthea_session import EnvItem2 as SessionEnvItem +from renku_data_services.notebooks.cr_amalthea_session import Item4 as SecretAsVolumeItem +from renku_data_services.notebooks.cr_amalthea_session import Model as _ASModel +from renku_data_services.notebooks.cr_amalthea_session import Resources3 as Resources +from renku_data_services.notebooks.cr_amalthea_session import Secret1 as SecretAsVolume +from renku_data_services.notebooks.cr_amalthea_session import SecretRef as SecretRefKey +from renku_data_services.notebooks.cr_amalthea_session import SecretRef1 as SecretRefWhole +from renku_data_services.notebooks.cr_amalthea_session import Spec as AmaltheaSessionSpec +from renku_data_services.notebooks.cr_amalthea_session import Type as AuthenticationType +from renku_data_services.notebooks.cr_amalthea_session import Type1 as CodeRepositoryType +from renku_data_services.notebooks.cr_jupyter_server import Model as _JSModel +from renku_data_services.notebooks.cr_jupyter_server import Patch +from renku_data_services.notebooks.cr_jupyter_server import Spec as JupyterServerSpec +from renku_data_services.notebooks.cr_jupyter_server import Type as PatchType + + +class Metadata(BaseModel): + """Basic k8s metadata spec.""" + + class Config: + """Do not exclude unknown properties.""" + + extra = "allow" + + name: str + namespace: str | None = None + labels: dict[str, str] = Field(default_factory=dict) + annotations: dict[str, str] = Field(default_factory=dict) + uid: str | None = None + creationTimestamp: datetime | None = None + deletionTimestamp: datetime | None = None + + +class ComputeResources(BaseModel): + """Resource requests from k8s values.""" + + cpu: float | None = None + memory: int | None = None + storage: int | None = None + gpu: int | None = None + + @field_validator("cpu", mode="before") + @classmethod + def _convert_k8s_cpu(cls, val: Any) -> Any: + if val is None: + return None + return float(parse_quantity(val)) + + @field_validator("gpu", mode="before") + @classmethod + def _convert_k8s_gpu(cls, val: Any) -> Any: + if val is None: + return None + return round(parse_quantity(val), ndigits=None) + + @field_validator("memory", "storage", mode="before") + @classmethod + def _convert_k8s_bytes(cls, val: Any) -> Any: + """Converts to gigabytes of base 10.""" + if val is None: + return None + return round(parse_quantity(val) / 1_000_000_000, ndigits=None) + + +class JupyterServerV1Alpha1(_JSModel): + """Jupyter server CRD.""" + + kind: str = "JupyterServer" + apiVersion: str = "amalthea.dev/v1alpha1" + metadata: Metadata + + def get_compute_resources(self) -> ComputeResources: + """Convert the k8s resource requests and storage into usable values.""" + if self.spec is None: + return ComputeResources() + resource_requests: dict = self.spec.jupyterServer.resources.get("requests", {}) + resource_requests["storage"] = self.spec.storage.size + return ComputeResources.model_validate(resource_requests) + + +class AmaltheaSessionV1Alpha1(_ASModel): + """Amalthea session CRD.""" + + kind: str = "AmaltheaSession" + apiVersion: str = "amalthea.dev/v1alpha1" + # Here we overwrite the default from ASModel because it is too weakly typed + metadata: Metadata # type: ignore[assignment] + + def get_compute_resources(self) -> ComputeResources: + """Convert the k8s resource requests and storage into usable values.""" + if self.spec is None: + return ComputeResources() + resource_requests: dict = {} + if self.spec.session.resources is not None: + resource_requests = self.spec.session.resources.requests or {} + resource_requests["storage"] = self.spec.session.storage.size + return ComputeResources.model_validate(resource_requests) + + @property + def project_id(self) -> ULID: + """Get the project ID from the annotations.""" + if "renku.io/project_id" not in self.metadata.annotations: + raise errors.ProgrammingError( + message=f"The session with name {self.metadata.name} is missing its project_id annotation" + ) + return cast(ULID, ULID.from_str(self.metadata.annotations["renku.io/project_id"])) + + @property + def launcher_id(self) -> ULID: + """Get the launcher ID from the annotations.""" + if "renku.io/launcher_id" not in self.metadata.annotations: + raise errors.ProgrammingError( + message=f"The session with name {self.metadata.name} is missing its launcher_id annotation" + ) + return cast(ULID, ULID.from_str(self.metadata.annotations["renku.io/launcher_id"])) + + @property + def resource_class_id(self) -> int: + """Get the resource class from the annotations.""" + if "renku.io/resource_class_id" not in self.metadata.annotations: + raise errors.ProgrammingError( + message=f"The session with name {self.metadata.name} is missing its resource_class_id annotation" + ) + return int(self.metadata.annotations["renku.io/resource_class_id"]) + + def as_apispec(self) -> apispec.SessionResponse: + """Convert the manifest into a form ready to be serialized and sent in a HTTP response.""" + if self.status is None: + raise errors.ProgrammingError( + message=f"The manifest for a session with name {self.metadata.name} cannot be serialized " + f"because it is missing a status" + ) + if self.spec is None: + raise errors.ProgrammingError( + message=f"The manifest for a session with name {self.metadata.name} cannot be serialized " + "because it is missing the spec field" + ) + if self.spec.session.resources is None: + raise errors.ProgrammingError( + message=f"The manifest for a session with name {self.metadata.name} cannot be serialized " + "because it is missing the spec.session.resources field" + ) + url = "None" + if self.base_url is not None: + url = self.base_url + ready_containers = 0 + total_containers = 0 + if self.status.initContainerCounts is not None: + ready_containers += self.status.initContainerCounts.ready or 0 + total_containers += self.status.initContainerCounts.total or 0 + if self.status.containerCounts is not None: + ready_containers += self.status.containerCounts.ready or 0 + total_containers += self.status.containerCounts.total or 0 + + if self.status.state in [State.Running, State.Hibernated, State.Failed]: + state = apispec.State3(self.status.state.value.lower()) + elif self.status.state == State.RunningDegraded: + state = apispec.State3.running + elif self.status.state == State.NotReady and self.metadata.deletionTimestamp is not None: + state = apispec.State3.stopping + else: + state = apispec.State3.starting + + return apispec.SessionResponse( + image=self.spec.session.image, + name=self.metadata.name, + resources=apispec.SessionResources( + requests=apispec.SessionResourcesRequests.model_validate( + self.get_compute_resources(), from_attributes=True + ) + if self.spec.session.resources.requests is not None + else None, + ), + started=self.metadata.creationTimestamp, + status=apispec.SessionStatus( + state=state, + ready_containers=ready_containers, + total_containers=total_containers, + ), + url=url, + project_id=str(self.project_id), + launcher_id=str(self.launcher_id), + resource_class_id=self.resource_class_id, + ) + + @property + def base_url(self) -> str | None: + """Get the URL of the session, excluding the default URL from the session launcher.""" + if self.status.url and len(self.status.url) > 0: + return self.status.url + if self.spec is None or self.spec.ingress is None: + return None + scheme = "https" if self.spec and self.spec.ingress and self.spec.ingress.tlsSecret else "http" + host = self.spec.ingress.host + path = self.spec.session.urlPath if self.spec.session.urlPath else "/" + params = None + query = None + fragment = None + url = urlunparse((scheme, host, path, params, query, fragment)) + return url diff --git a/components/renku_data_services/notebooks/models.py b/components/renku_data_services/notebooks/models.py new file mode 100644 index 000000000..fec11eb4b --- /dev/null +++ b/components/renku_data_services/notebooks/models.py @@ -0,0 +1,76 @@ +"""Basic models for amalthea sessions.""" + +from dataclasses import dataclass +from pathlib import Path + +from pydantic import AliasGenerator, BaseModel, Field, Json + +from renku_data_services.notebooks.crs import AmaltheaSessionV1Alpha1 + + +@dataclass +class SessionEnvVar: + """Environment variables for an amalthea session.""" + + name: str + value: str + + +@dataclass +class SessionUserSecrets: + """User secret mounted in an amalthea session.""" + + mount_path: Path + user_secret_ids: list[str] + + +class _AmaltheaSessionAnnotations(BaseModel): + class Config: + extra = "allow" + alias_generator = AliasGenerator( + alias=lambda field_name: f"renku.io/{field_name}", + ) + + session_launcher_id: str | None = None + project_id: str | None = None + user_secrets_mount_path: str | None = None + user_secrets_ids: Json[list[str]] = Field(default_factory=list) + env_variable_names: Json[list[str]] = Field(default_factory=list) + + +class _MetadataValidation(BaseModel): + class Config: + extra = "allow" + + name: str + annotations: _AmaltheaSessionAnnotations + labels: dict[str, str] = Field(default_factory=dict) + namespace: str | None = None + + +class AmaltheaSessionManifest: + """The manifest for an amalthea session.""" + + def __init__(self, manifest: AmaltheaSessionV1Alpha1) -> None: + self._manifest = manifest + self._metadata = _MetadataValidation.model_validate(self._manifest.metadata) + + def __repr__(self) -> str: + return f"{self.__class__}(name={self._metadata.name})" + + @property + def env_vars(self) -> dict[str, SessionEnvVar]: + """Extract the environment variables from a manifest.""" + output: dict[str, SessionEnvVar] = {} + assert self._manifest.spec + for env in self._manifest.spec.session.env or []: + if env.value is None: + continue + output[env.name] = SessionEnvVar(env.name, env.value) + return output + + @property + def requested_env_vars(self) -> dict[str, SessionEnvVar]: + """The environment variables requested.""" + requested_names = self._metadata.annotations.env_variable_names + return {ikey: ival for ikey, ival in self.env_vars.items() if ikey in requested_names} diff --git a/components/renku_data_services/notebooks/util/authn.py b/components/renku_data_services/notebooks/util/authn.py index 6cc578cc4..d01b169e0 100644 --- a/components/renku_data_services/notebooks/util/authn.py +++ b/components/renku_data_services/notebooks/util/authn.py @@ -1,52 +1,40 @@ """Authentication that is compatible with the tokens sent to the notebook service.""" -from collections.abc import Awaitable, Callable, Coroutine -from dataclasses import dataclass +from collections.abc import Callable, Coroutine from functools import wraps from typing import Any, Concatenate, ParamSpec, TypeVar from sanic import Request -from renku_data_services.errors import errors -from renku_data_services.notebooks.api.classes.user import AnonymousUser, RegisteredUser -from renku_data_services.notebooks.config import _NotebooksConfig +from renku_data_services.base_models import AnonymousAPIUser, APIUser, AuthenticatedAPIUser, Authenticator _T = TypeVar("_T") _P = ParamSpec("_P") -@dataclass -class NotebooksAuthenticator: - """Authentication for notebooks endpoints.""" - - config: _NotebooksConfig - - def authenticate(self, request: Request) -> RegisteredUser | AnonymousUser: - """Validate the tokens and ensure the user is signed in.""" - headers_dict: dict[str, str] = {str(k): str(v) for (k, v) in request.headers.items()} - user: RegisteredUser | AnonymousUser = RegisteredUser(headers_dict) - if not self.config.anonymous_sessions_enabled and not user.authenticated: - raise errors.UnauthorizedError(message="You have to be authenticated to perform this operation.") - if not user.authenticated: - user = AnonymousUser(headers_dict, self.config.git.url) - return user - - -def notebooks_authenticate( - authenticator: NotebooksAuthenticator, +def notebooks_internal_gitlab_authenticate( + authenticator: Authenticator, ) -> Callable[ - [Callable[Concatenate[Request, RegisteredUser | AnonymousUser, _P], Awaitable[_T]]], - Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]], + [Callable[Concatenate[Request, AuthenticatedAPIUser | AnonymousAPIUser, APIUser, _P], Coroutine[Any, Any, _T]]], + Callable[Concatenate[Request, AuthenticatedAPIUser | AnonymousAPIUser, _P], Coroutine[Any, Any, _T]], ]: """Decorator for a Sanic handler that that adds a notebooks user.""" def decorator( - f: Callable[Concatenate[Request, RegisteredUser | AnonymousUser, _P], Awaitable[_T]], - ) -> Callable[Concatenate[Request, _P], Coroutine[Any, Any, _T]]: + f: Callable[ + Concatenate[Request, AuthenticatedAPIUser | AnonymousAPIUser, APIUser, _P], Coroutine[Any, Any, _T] + ], + ) -> Callable[Concatenate[Request, AuthenticatedAPIUser | AnonymousAPIUser, _P], Coroutine[Any, Any, _T]]: @wraps(f) - async def decorated_function(request: Request, *args: _P.args, **kwargs: _P.kwargs) -> _T: - user = authenticator.authenticate(request) - return await f(request, user, *args, **kwargs) + async def decorated_function( + request: Request, + user: AuthenticatedAPIUser | AnonymousAPIUser, + *args: _P.args, + **kwargs: _P.kwargs, + ) -> _T: + access_token = str(request.headers.get("Gitlab-Access-Token")) + internal_gitlab_user = await authenticator.authenticate(access_token, request) + return await f(request, user, internal_gitlab_user, *args, **kwargs) return decorated_function diff --git a/components/renku_data_services/notebooks/util/kubernetes_.py b/components/renku_data_services/notebooks/util/kubernetes_.py index 135afcc3a..7cf289c95 100644 --- a/components/renku_data_services/notebooks/util/kubernetes_.py +++ b/components/renku_data_services/notebooks/util/kubernetes_.py @@ -18,14 +18,15 @@ from __future__ import annotations -from dataclasses import dataclass from enum import StrEnum from hashlib import md5 -from typing import Any, Self, TypeAlias +from typing import Any, TypeAlias, cast import escapism from kubernetes.client import V1Container +from renku_data_services.notebooks.crs import Patch, PatchType + def renku_1_make_server_name(safe_username: str, namespace: str, project: str, branch: str, commit_sha: str) -> str: """Form a unique server name for Renku 1.0 sessions. @@ -58,11 +59,10 @@ def renku_2_make_server_name(safe_username: str, project_id: str, launcher_id: s server_hash = md5(server_string_for_hashing.encode(), usedforsecurity=False).hexdigest().lower() prefix = _make_server_name_prefix(safe_username) # NOTE: A K8s object name can only contain lowercase alphanumeric characters, hyphens, or dots. - # Must be less than 253 characters long and start and end with an alphanumeric. + # Must be no more than 63 characters because the name is used to create a k8s Service and Services + # have more restrictions for their names beacuse their names have to make a valid hostname. # NOTE: We use server name as a label value, so, server name must be less than 63 characters. - # NOTE: Amalthea adds 11 characters to the server name in a label, so we have only - # 52 characters available. - # !NOTE: For now we limit the server name to 42 characters. + # !NOTE: For now we limit the server name to a max of 42 characters. # NOTE: This is 12 + 9 + 21 = 42 characters return f"{prefix[:12]}-renku-2-{server_hash[:21]}" @@ -110,25 +110,17 @@ class PatchKind(StrEnum): merge: str = "application/merge-patch+json" -@dataclass -class Patch: - """Representation of a JSON patch.""" - - patch: JsonPatch | MergePatch - type: PatchKind - - @classmethod - def from_dict(cls, data: dict[str, Any]) -> Self: - """Create a patch from a dictionary.""" - return cls(data["patch"], PatchKind(data["type"])) - - def find_container(patches: list[Patch], container_name: str) -> dict[str, Any] | None: """Find the json patch corresponding a given container.""" + # rfc 7386 patches are dictionaries, i.e. merge patch or json merge patch + # rfc 6902 patches are lists, i.e. json patch for patch_obj in patches: - if patch_obj.type != PatchKind.json or not isinstance(patch_obj.patch, list): + if patch_obj.type != PatchType.application_json_patch_json or not isinstance(patch_obj.patch, list): continue for p in patch_obj.patch: + if not isinstance(p, dict): + continue + p = cast(dict[str, Any], p) if ( p.get("op") == "add" and p.get("path") == "/statefulset/spec/template/spec/containers/-" diff --git a/components/renku_data_services/notebooks/util/retries.py b/components/renku_data_services/notebooks/util/retries.py index e99f374a2..5e6854db8 100644 --- a/components/renku_data_services/notebooks/util/retries.py +++ b/components/renku_data_services/notebooks/util/retries.py @@ -1,9 +1,10 @@ """Methods for retrying requests.""" +import asyncio import functools -from collections.abc import Callable +from collections.abc import Awaitable, Callable from time import sleep -from typing import Any, Concatenate, ParamSpec, TypeVar +from typing import Concatenate, ParamSpec, TypeVar from renku_data_services.notebooks.errors.intermittent import RetryTimeoutError @@ -12,7 +13,7 @@ def retry_with_exponential_backoff( - should_retry: Callable[[Any], bool], + should_retry: Callable[[_RetType], bool], num_retries: int = 10, initial_wait_ms: int = 20, multiplier: float = 2.0, @@ -39,3 +40,38 @@ def wrapper_retry(*args: _Params.args, **kwargs: _Params.kwargs) -> _RetType: return wrapper_retry return decorator_retry + + +def retry_with_exponential_backoff_async( + should_retry: Callable[[_RetType], bool], + num_retries: int = 10, + initial_wait_ms: int = 20, + multiplier: float = 2.0, +) -> Callable[ + [Callable[Concatenate[_Params], Awaitable[_RetType]]], Callable[Concatenate[_Params], Awaitable[_RetType]] +]: + """Retries the wrapped function with an exponential backoff. + + The should_retry "callback" is passed the results from calling the wrapped function. + If the response is true, the function is called again, otherwise the loop ends and + the result of the wrapped function is returned. + + With the default values the wait times start with 20ms and then double every iteration. + """ + + def decorator_retry( + func: Callable[Concatenate[_Params], Awaitable[_RetType]], + ) -> Callable[Concatenate[_Params], Awaitable[_RetType]]: + @functools.wraps(func) + async def wrapper_retry(*args: _Params.args, **kwargs: _Params.kwargs) -> _RetType: + for i in range(num_retries): + res = await func(*args, **kwargs) + if not should_retry(res): + return res + + await asyncio.sleep(initial_wait_ms * (multiplier**i) / 1000) + raise RetryTimeoutError(f"Retrying the function {func.__name__} timed out after {num_retries} retries.") + + return wrapper_retry + + return decorator_retry diff --git a/components/renku_data_services/project/api.spec.yaml b/components/renku_data_services/project/api.spec.yaml index 0c286f2d3..faf943256 100644 --- a/components/renku_data_services/project/api.spec.yaml +++ b/components/renku_data_services/project/api.spec.yaml @@ -259,6 +259,27 @@ paths: $ref: "#/components/responses/Error" tags: - projects + /projects/{project_id}/data_connector_links: + parameters: + - in: path + name: project_id + required: true + schema: + $ref: "#/components/schemas/Ulid" + description: the ID of the project + get: + summary: Get all links from data connectors to a given project + responses: + "200": + description: List of data connector to project links + content: + application/json: + schema: + $ref: "#/components/schemas/DataConnectorToProjectLinksList" + default: + $ref: "#/components/responses/Error" + tags: + - projects components: schemas: ProjectsList: @@ -495,6 +516,32 @@ components: type: string description: Entity Tag example: "9EE498F9D565D0C41E511377425F32F3" + DataConnectorToProjectLinksList: + description: A list of links from a data connector to a project + type: array + items: + $ref: "#/components/schemas/DataConnectorToProjectLink" + DataConnectorToProjectLink: + description: A link from a data connector to a project in Renku 2.0 + type: object + additionalProperties: false + properties: + id: + $ref: "#/components/schemas/Ulid" + data_connector_id: + $ref: "#/components/schemas/Ulid" + project_id: + $ref: "#/components/schemas/Ulid" + creation_date: + $ref: "#/components/schemas/CreationDate" + created_by: + $ref: "#/components/schemas/UserId" + required: + - id + - data_connector_id + - project_id + - creation_date + - created_by ProjectGetQuery: description: Query params for project get request allOf: diff --git a/components/renku_data_services/project/apispec.py b/components/renku_data_services/project/apispec.py index 30a3ad6e7..b050d0f04 100644 --- a/components/renku_data_services/project/apispec.py +++ b/components/renku_data_services/project/apispec.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: api.spec.yaml -# timestamp: 2024-08-30T09:58:32+00:00 +# timestamp: 2024-09-17T13:48:48+00:00 from __future__ import annotations @@ -33,6 +33,44 @@ class Role(Enum): owner = "owner" +class DataConnectorToProjectLink(BaseAPISpec): + model_config = ConfigDict( + extra="forbid", + ) + id: str = Field( + ..., + description="ULID identifier", + max_length=26, + min_length=26, + pattern="^[0-7][0-9A-HJKMNP-TV-Z]{25}$", + ) + data_connector_id: str = Field( + ..., + description="ULID identifier", + max_length=26, + min_length=26, + pattern="^[0-7][0-9A-HJKMNP-TV-Z]{25}$", + ) + project_id: str = Field( + ..., + description="ULID identifier", + max_length=26, + min_length=26, + pattern="^[0-7][0-9A-HJKMNP-TV-Z]{25}$", + ) + creation_date: datetime = Field( + ..., + description="The date and time the resource was created (in UTC and ISO-8601 format)", + example="2023-11-01T17:32:28Z", + ) + created_by: str = Field( + ..., + description="Keycloak user ID", + example="f74a228b-1790-4276-af5f-25c2424e9b0c", + pattern="^[A-Za-z0-9]{1}[A-Za-z0-9-]+$", + ) + + class PaginationRequest(BaseAPISpec): model_config = ConfigDict( extra="forbid", @@ -103,6 +141,12 @@ class ProjectMemberResponse(BaseAPISpec): role: Role +class DataConnectorToProjectLinksList(RootModel[List[DataConnectorToProjectLink]]): + root: List[DataConnectorToProjectLink] = Field( + ..., description="A list of links from a data connector to a project" + ) + + class ProjectGetQuery(PaginationRequest): namespace: str = Field("", description="A namespace, used as a filter.") diff --git a/components/renku_data_services/project/db.py b/components/renku_data_services/project/db.py index e2a8ff2fb..8523db72c 100644 --- a/components/renku_data_services/project/db.py +++ b/components/renku_data_services/project/db.py @@ -21,6 +21,7 @@ from renku_data_services.message_queue.db import EventRepository from renku_data_services.message_queue.interface import IMessageQueue from renku_data_services.message_queue.redis_queue import dispatch_message +from renku_data_services.namespace import orm as ns_schemas from renku_data_services.namespace.db import GroupRepository from renku_data_services.project import apispec as project_apispec from renku_data_services.project import models @@ -95,7 +96,7 @@ async def get_project_by_namespace_slug( async with self.session_maker() as session: stmt = select(schemas.ProjectORM) stmt = _filter_by_namespace_slug(stmt, namespace) - stmt = stmt.where(schemas.ProjectSlug.slug == slug.lower()) + stmt = stmt.where(ns_schemas.EntitySlugORM.slug == slug.lower()) result = await session.execute(stmt) project_orm = result.scalars().first() @@ -131,7 +132,7 @@ async def insert_project( if not session: raise errors.ProgrammingError(message="A database session is required") ns = await session.scalar( - select(schemas.NamespaceORM).where(schemas.NamespaceORM.slug == project.namespace.lower()) + select(ns_schemas.NamespaceORM).where(ns_schemas.NamespaceORM.slug == project.namespace.lower()) ) if not ns: raise errors.MissingResourceError( @@ -167,10 +168,10 @@ async def insert_project( creation_date=datetime.now(UTC).replace(microsecond=0), keywords=project.keywords, ) - project_slug = schemas.ProjectSlug(slug, project_id=project_orm.id, namespace_id=ns.id) + project_slug = ns_schemas.EntitySlugORM.create_project_slug(slug, project_id=project_orm.id, namespace_id=ns.id) - session.add(project_slug) session.add(project_orm) + session.add(project_slug) await session.flush() await session.refresh(project_orm) @@ -237,7 +238,9 @@ async def update_project( if "namespace" in payload: ns_slug = payload["namespace"] - ns = await session.scalar(select(schemas.NamespaceORM).where(schemas.NamespaceORM.slug == ns_slug.lower())) + ns = await session.scalar( + select(ns_schemas.NamespaceORM).where(ns_schemas.NamespaceORM.slug == ns_slug.lower()) + ) if not ns: raise errors.MissingResourceError(message=f"The namespace with slug {ns_slug} does not exist") if not ns.group_id and not ns.user_id: @@ -297,9 +300,9 @@ async def delete_project( def _filter_by_namespace_slug(statement: Select[tuple[_T]], namespace: str) -> Select[tuple[_T]]: """Filters a select query on projects to a given namespace.""" return ( - statement.where(schemas.NamespaceORM.slug == namespace.lower()) - .where(schemas.ProjectSlug.namespace_id == schemas.NamespaceORM.id) - .where(schemas.ProjectORM.id == schemas.ProjectSlug.project_id) + statement.where(ns_schemas.NamespaceORM.slug == namespace.lower()) + .where(ns_schemas.EntitySlugORM.namespace_id == ns_schemas.NamespaceORM.id) + .where(schemas.ProjectORM.id == ns_schemas.EntitySlugORM.project_id) ) diff --git a/components/renku_data_services/project/orm.py b/components/renku_data_services/project/orm.py index b50232fe3..22b765d43 100644 --- a/components/renku_data_services/project/orm.py +++ b/components/renku_data_services/project/orm.py @@ -1,27 +1,29 @@ """SQLAlchemy's schemas for the projects database.""" from datetime import datetime -from typing import Optional +from typing import TYPE_CHECKING, Optional -from sqlalchemy import DateTime, Index, Integer, MetaData, String, func +from sqlalchemy import DateTime, Integer, MetaData, String, func from sqlalchemy.dialects.postgresql import ARRAY from sqlalchemy.orm import DeclarativeBase, Mapped, MappedAsDataclass, mapped_column, relationship from sqlalchemy.schema import ForeignKey from ulid import ULID from renku_data_services.authz import models as authz_models -from renku_data_services.namespace.orm import NamespaceORM +from renku_data_services.base_orm.registry import COMMON_ORM_REGISTRY from renku_data_services.project import models from renku_data_services.project.apispec import Visibility from renku_data_services.utils.sqlalchemy import ULIDType -metadata_obj = MetaData(schema="projects") # Has to match alembic ini section name +if TYPE_CHECKING: + from renku_data_services.namespace.orm import EntitySlugORM class BaseORM(MappedAsDataclass, DeclarativeBase): """Base class for all ORM classes.""" - metadata = metadata_obj + metadata = MetaData(schema="projects") + registry = COMMON_ORM_REGISTRY class ProjectORM(BaseORM): @@ -36,7 +38,9 @@ class ProjectORM(BaseORM): keywords: Mapped[Optional[list[str]]] = mapped_column("keywords", ARRAY(String(99)), nullable=True) # NOTE: The project slugs table has a foreign key from the projects table, but there is a stored procedure # triggered by the deletion of slugs to remove the project used by the slug. See migration 89aa4573cfa9. - slug: Mapped["ProjectSlug"] = relationship(lazy="joined", init=False, repr=False, viewonly=True) + slug: Mapped["EntitySlugORM"] = relationship( + lazy="joined", init=False, repr=False, viewonly=True, back_populates="project" + ) repositories: Mapped[list["ProjectRepositoryORM"]] = relationship( back_populates="project", default_factory=list, @@ -81,39 +85,3 @@ class ProjectRepositoryORM(BaseORM): ForeignKey("projects.id", ondelete="CASCADE"), default=None, index=True ) project: Mapped[Optional[ProjectORM]] = relationship(back_populates="repositories", default=None, repr=False) - - -class ProjectSlug(BaseORM): - """Project and namespace slugs.""" - - __tablename__ = "project_slugs" - __table_args__ = (Index("project_slugs_unique_slugs", "namespace_id", "slug", unique=True),) - - id: Mapped[int] = mapped_column(primary_key=True, init=False) - slug: Mapped[str] = mapped_column(String(99), index=True, nullable=False) - project_id: Mapped[ULID] = mapped_column( - ForeignKey(ProjectORM.id, ondelete="CASCADE", name="project_slugs_project_id_fk"), index=True - ) - namespace_id: Mapped[ULID] = mapped_column( - ForeignKey(NamespaceORM.id, ondelete="CASCADE", name="project_slugs_namespace_id_fk"), index=True - ) - namespace: Mapped[NamespaceORM] = relationship(lazy="joined", init=False, repr=False, viewonly=True) - - -class ProjectSlugOld(BaseORM): - """Project slugs history.""" - - __tablename__ = "project_slugs_old" - - id: Mapped[int] = mapped_column(primary_key=True, init=False) - slug: Mapped[str] = mapped_column(String(99), index=True, nullable=False) - created_at: Mapped[datetime] = mapped_column( - DateTime(timezone=True), nullable=False, index=True, init=False, server_default=func.now() - ) - latest_slug_id: Mapped[int] = mapped_column( - ForeignKey(ProjectSlug.id, ondelete="CASCADE"), - nullable=False, - init=False, - index=True, - ) - latest_slug: Mapped[ProjectSlug] = relationship(lazy="joined", repr=False, viewonly=True) diff --git a/components/renku_data_services/repositories/blueprints.py b/components/renku_data_services/repositories/blueprints.py index 551164ab0..48029b1b1 100644 --- a/components/renku_data_services/repositories/blueprints.py +++ b/components/renku_data_services/repositories/blueprints.py @@ -8,7 +8,7 @@ import renku_data_services.base_models as base_models from renku_data_services import errors -from renku_data_services.base_api.auth import authenticate +from renku_data_services.base_api.auth import authenticate_2 from renku_data_services.base_api.blueprint import BlueprintFactoryResponse, CustomBlueprint from renku_data_services.base_api.etag import extract_if_none_match from renku_data_services.repositories import apispec @@ -28,23 +28,23 @@ class RepositoriesBP(CustomBlueprint): def get_one_repository(self) -> BlueprintFactoryResponse: """Get the metadata available about a repository.""" - @authenticate(self.internal_gitlab_authenticator) - async def _get_internal_gitlab_user(_: Request, user: base_models.APIUser) -> base_models.APIUser: - return user - - @authenticate(self.authenticator) + @authenticate_2(self.authenticator, self.internal_gitlab_authenticator) @extract_if_none_match async def _get_one_repository( - request: Request, user: base_models.APIUser, repository_url: str, etag: str | None + request: Request, + user: base_models.APIUser, + internal_gitlab_user: base_models.APIUser, + repository_url: str, + etag: str | None, ) -> JSONResponse | HTTPResponse: repository_url = unquote(repository_url) RepositoryParams.model_validate(dict(repository_url=repository_url)) - async def get_internal_gitlab_user() -> base_models.APIUser: - return await _get_internal_gitlab_user(request) - result = await self.git_repositories_repo.get_repository( - repository_url=repository_url, user=user, etag=etag, get_internal_gitlab_user=get_internal_gitlab_user + repository_url=repository_url, + user=user, + etag=etag, + internal_gitlab_user=internal_gitlab_user, ) if result == "304": return HTTPResponse(status=304) diff --git a/components/renku_data_services/repositories/db.py b/components/renku_data_services/repositories/db.py index c44a1a5ce..afebc4b5e 100644 --- a/components/renku_data_services/repositories/db.py +++ b/components/renku_data_services/repositories/db.py @@ -1,7 +1,7 @@ """Adapters for repositories database classes.""" -from collections.abc import Callable, Coroutine -from typing import Any, Literal +from collections.abc import Callable +from typing import Literal from urllib.parse import urlparse from httpx import AsyncClient as HttpClient @@ -38,7 +38,7 @@ async def get_repository( repository_url: str, user: base_models.APIUser, etag: str | None, - get_internal_gitlab_user: Callable[..., Coroutine[Any, Any, base_models.APIUser]], + internal_gitlab_user: base_models.APIUser, ) -> models.RepositoryProviderMatch | Literal["304"]: """Get the metadata about a repository.""" repository_netloc = urlparse(repository_url).netloc @@ -52,10 +52,9 @@ async def get_repository( if self.internal_gitlab_url: internal_gitlab_netloc = urlparse(self.internal_gitlab_url).netloc if matched_client is None and internal_gitlab_netloc == repository_netloc: - gitlab_user = await get_internal_gitlab_user() return await self._get_repository_from_internal_gitlab( repository_url=repository_url, - user=gitlab_user, + user=internal_gitlab_user, etag=etag, internal_gitlab_url=self.internal_gitlab_url, ) diff --git a/components/renku_data_services/session/api.spec.yaml b/components/renku_data_services/session/api.spec.yaml index 529499035..8a0f1fe90 100644 --- a/components/renku_data_services/session/api.spec.yaml +++ b/components/renku_data_services/session/api.spec.yaml @@ -11,10 +11,10 @@ servers: paths: /environments: get: - summary: Get all environments + summary: Get all global environments responses: "200": - description: List of environments + description: List of global environments content: application/json: schema: @@ -24,7 +24,7 @@ paths: tags: - environments post: - summary: Create a new session environment + summary: Create a new global session environment description: Requires admin permissions requestBody: required: true @@ -45,7 +45,7 @@ paths: - environments /environments/{environment_id}: get: - summary: Get a session environment + summary: Get a global session environment parameters: - in: path name: environment_id @@ -70,7 +70,7 @@ paths: tags: - environments patch: - summary: Update specific fields of an existing session environment + summary: Update specific fields of an existing global session environment description: Requires admin permissions parameters: - in: path @@ -102,7 +102,7 @@ paths: tags: - environments delete: - summary: Remove a session environment + summary: Remove a global session environment parameters: - in: path name: environment_id @@ -176,7 +176,7 @@ paths: tags: - session_launchers patch: - summary: Update specific fields of an existing session + summary: Update specific fields of an existing session launcher parameters: - in: path name: launcher_id @@ -221,32 +221,6 @@ paths: $ref: "#/components/responses/Error" tags: - session_launchers - /session_launchers/{launcher_id}/start: - post: - summary: Use a session launcher to start a session - parameters: - - in: path - name: launcher_id - required: true - schema: - $ref: "#/components/schemas/Ulid" - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/SessionStart" - responses: - "201": - description: The started session - content: - application/json: - schema: - $ref: "#/components/schemas/Session" - default: - $ref: "#/components/responses/Error" - tags: - - sessions /projects/{project_id}/session_launchers: get: summary: Get a project's session launchers @@ -290,11 +264,31 @@ components: $ref: "#/components/schemas/ContainerImage" default_url: $ref: "#/components/schemas/DefaultUrl" + uid: + $ref: "#/components/schemas/EnvironmentUid" + gid: + $ref: "#/components/schemas/EnvironmentGid" + working_directory: + $ref: "#/components/schemas/EnvironmentWorkingDirectory" + mount_directory: + $ref: "#/components/schemas/EnvironmentMountDirectory" + port: + $ref: "#/components/schemas/EnvironmentPort" + command: + $ref: "#/components/schemas/EnvironmentCommand" + args: + $ref: "#/components/schemas/EnvironmentArgs" required: - id - name - creation_date - container_image + - port + - working_directory + - mount_directory + - uid + - gid + - default_url example: id: 01AN4Z79ZS6XX96588FDX0H099 name: JupyterLab environment @@ -302,6 +296,33 @@ components: description: JupyterLab session environment container_image: renku-jupyter:latest default_url: "/lab" + port: 8080 + working_directory: /home/jovyan/work + mount_directory: /home/jovyan/work + uid: 1000 + gid: 1000 + EnvironmentGetInLauncher: + allOf: + - $ref: "#/components/schemas/Environment" + - type: object + properties: + environment_kind: + $ref: "#/components/schemas/EnvironmentKind" + required: + - environment_kind + example: + environment_kind: global_environment + EnvironmentPostInLauncher: + allOf: + - $ref: "#/components/schemas/EnvironmentPost" + - type: object + properties: + environment_kind: + $ref: "#/components/schemas/EnvironmentKind" + required: + - environment_kind + example: + environment_kind: global_environment EnvironmentPost: description: Data required to create a session environment type: object @@ -313,10 +334,49 @@ components: container_image: $ref: "#/components/schemas/ContainerImage" default_url: - $ref: "#/components/schemas/DefaultUrl" + allOf: + - $ref: "#/components/schemas/DefaultUrl" + - default: /lab + default: /lab + uid: + allOf: + - $ref: "#/components/schemas/EnvironmentUid" + - default: 1000 + default: 1000 + gid: + allOf: + - $ref: "#/components/schemas/EnvironmentGid" + - default: 1000 + default: 1000 + working_directory: + allOf: + - $ref: "#/components/schemas/EnvironmentWorkingDirectory" + - default: /home/jovyan/work + default: /home/jovyan/work + mount_directory: + allOf: + - $ref: "#/components/schemas/EnvironmentMountDirectory" + - default: /home/jovyan/work + default: /home/jovyan/work + port: + allOf: + - $ref: "#/components/schemas/EnvironmentPort" + - default: 8080 + default: 8080 + command: + $ref: "#/components/schemas/EnvironmentCommand" + args: + $ref: "#/components/schemas/EnvironmentArgs" required: - name - container_image + EnvironmentPatchInLauncher: + allOf: + - $ref: "#/components/schemas/EnvironmentPatch" + - type: object + properties: + environment_kind: + $ref: "#/components/schemas/EnvironmentKind" EnvironmentPatch: type: object description: Update a session environment @@ -330,6 +390,20 @@ components: $ref: "#/components/schemas/ContainerImage" default_url: $ref: "#/components/schemas/DefaultUrl" + uid: + $ref: "#/components/schemas/EnvironmentUid" + gid: + $ref: "#/components/schemas/EnvironmentGid" + working_directory: + $ref: "#/components/schemas/EnvironmentWorkingDirectory" + mount_directory: + $ref: "#/components/schemas/EnvironmentMountDirectory" + port: + $ref: "#/components/schemas/EnvironmentPort" + command: + $ref: "#/components/schemas/EnvironmentCommand" + args: + $ref: "#/components/schemas/EnvironmentArgs" SessionLaunchersList: description: A list of Renku session launchers type: array @@ -350,30 +424,36 @@ components: $ref: "#/components/schemas/CreationDate" description: $ref: "#/components/schemas/Description" - environment_kind: - $ref: "#/components/schemas/EnvironmentKind" - environment_id: - $ref: "#/components/schemas/EnvironmentId" + environment: + $ref: "#/components/schemas/EnvironmentGetInLauncher" resource_class_id: $ref: "#/components/schemas/ResourceClassId" - container_image: - $ref: "#/components/schemas/ContainerImage" - default_url: - $ref: "#/components/schemas/DefaultUrl" required: - id - project_id - name - creation_date - - environment_kind + - environment + - resource_class_id example: id: 01AN4Z79ZS5XN0F25N3DB94T4R project_id: 01AN4Z79ZS5XN0F25N3DB94T4R name: Renku R Session creation_date: "2023-11-01T17:32:28Z" description: R compute session - environment_kind: global_environment - environment_id: 01AN4Z79ZS6XX96588FDX0H099 + environment: + id: 01AN4Z79ZS6XX96588FDX0H099 + name: Rstudio + creation_date: "2023-11-01T17:32:28Z" + description: JupyterLab session environment + environment_kind: GLOBAL + container_image: rocker/rstudio + default_url: "/rstudio" + port: 8080 + working_directory: /home/rstudio/work + mount_directory: /home/rstudio/work + uid: 1000 + gid: 1000 SessionLauncherPost: description: Data required to create a session launcher type: object @@ -385,20 +465,21 @@ components: $ref: "#/components/schemas/Ulid" description: $ref: "#/components/schemas/Description" - environment_kind: - $ref: "#/components/schemas/EnvironmentKind" - environment_id: - $ref: "#/components/schemas/EnvironmentId" resource_class_id: $ref: "#/components/schemas/ResourceClassId" - container_image: - $ref: "#/components/schemas/ContainerImage" - default_url: - $ref: "#/components/schemas/DefaultUrl" + environment: + oneOf: + - $ref: "#/components/schemas/EnvironmentPostInLauncher" + - $ref: "#/components/schemas/EnvironmentIdOnlyPost" required: - name - project_id - - environment_kind + - environment + example: + project_id: 01AN4Z79ZS5XN0F25N3DB94T4R + name: Renku R Session + environment: + id: 01AN4Z79ZS6XX96588FDX0H099 SessionLauncherPatch: type: object description: Update a session launcher @@ -408,23 +489,12 @@ components: $ref: "#/components/schemas/SessionName" description: $ref: "#/components/schemas/Description" - environment_kind: - $ref: "#/components/schemas/EnvironmentKind" - environment_id: - $ref: "#/components/schemas/EnvironmentId" - resource_class_id: - $ref: "#/components/schemas/ResourceClassId" - container_image: - $ref: "#/components/schemas/ContainerImage" - default_url: - $ref: "#/components/schemas/DefaultUrl" - SessionStart: - type: object - description: Start a session - additionalProperties: true - properties: resource_class_id: $ref: "#/components/schemas/ResourceClassId" + environment: + oneOf: + - $ref: "#/components/schemas/EnvironmentPatchInLauncher" + - $ref: "#/components/schemas/EnvironmentIdOnlyPatch" Ulid: description: ULID identifier type: string @@ -437,13 +507,25 @@ components: minLength: 1 maxLength: 99 example: My Renku Session :) + EnvironmentIdOnlyPatch: + type: object + properties: + id: + $ref: "#/components/schemas/EnvironmentId" + EnvironmentIdOnlyPost: + type: object + properties: + id: + $ref: "#/components/schemas/EnvironmentId" + required: + - id EnvironmentKind: description: Kind of environment to use type: string enum: - - global_environment - - container_image - example: container_image + - GLOBAL + - CUSTOM + example: CUSTOM EnvironmentId: description: Id of the environment to use type: string @@ -473,15 +555,46 @@ components: type: integer default: null nullable: true - Session: - description: A Renku session - type: object - additionalProperties: true - properties: - name: - $ref: "#/components/schemas/SessionName" - url: - type: string + EnvironmentPort: + type: integer + minimum: 0 + exclusiveMinimum: true + exclusiveMaximum: true + # NOTE: we reserve 65400 - 65535 for usage of Renku sidecars and services + maximum: 65400 + description: The TCP port (on any container in the session) where user requests will be routed to from the ingress + EnvironmentUid: + type: integer + minimum: 0 + exclusiveMinimum: true + maximum: 65535 + description: The user ID used to run the session + EnvironmentGid: + type: integer + minimum: 0 + exclusiveMinimum: true + maximum: 65535 + description: The group ID used to run the session + EnvironmentWorkingDirectory: + type: string + description: The location where the session will start + minLength: 1 + EnvironmentMountDirectory: + type: string + description: The location where the persistent storage for the session will be mounted, usually it should be identical to or a parent of the working directory + minLength: 1 + EnvironmentCommand: + type: array + items: + type: string + description: The command that will be run i.e. will overwrite the image Dockerfile ENTRYPOINT, equivalent to command in Kubernetes + minLength: 1 + EnvironmentArgs: + type: array + items: + type: string + description: The arguments that will follow the command, i.e. will overwrite the image Dockerfile CMD, equivalent to args in Kubernetes + minLength: 1 ErrorResponse: type: object properties: diff --git a/components/renku_data_services/session/apispec.py b/components/renku_data_services/session/apispec.py index 6be545731..3ac7b194c 100644 --- a/components/renku_data_services/session/apispec.py +++ b/components/renku_data_services/session/apispec.py @@ -1,34 +1,20 @@ # generated by datamodel-codegen: # filename: api.spec.yaml -# timestamp: 2024-06-10T13:14:40+00:00 +# timestamp: 2024-08-25T21:01:41+00:00 from __future__ import annotations from datetime import datetime from enum import Enum -from typing import List, Optional +from typing import List, Optional, Union from pydantic import ConfigDict, Field, RootModel from renku_data_services.session.apispec_base import BaseAPISpec class EnvironmentKind(Enum): - global_environment = "global_environment" - container_image = "container_image" - - -class Session(BaseAPISpec): - model_config = ConfigDict( - extra="allow", - ) - name: Optional[str] = Field( - None, - description="Renku session name", - example="My Renku Session :)", - max_length=99, - min_length=1, - ) - url: Optional[str] = None + GLOBAL = "GLOBAL" + CUSTOM = "CUSTOM" class Error(BaseAPISpec): @@ -49,7 +35,7 @@ class Environment(BaseAPISpec): description="ULID identifier", max_length=26, min_length=26, - pattern="^[A-Z0-9]{26}$", + pattern="^[0-7][0-9A-HJKMNP-TV-Z]{25}$", ) name: str = Field( ..., @@ -72,12 +58,46 @@ class Environment(BaseAPISpec): example="renku/renkulab-py:3.10-0.18.1", max_length=500, ) - default_url: Optional[str] = Field( - None, + default_url: str = Field( + ..., description="The default path to open in a session", example="/lab", max_length=200, ) + uid: int = Field( + ..., description="The user ID used to run the session", gt=0, le=65535 + ) + gid: int = Field( + ..., description="The group ID used to run the session", gt=0, le=65535 + ) + working_directory: str = Field( + ..., description="The location where the session will start", min_length=1 + ) + mount_directory: str = Field( + ..., + description="The location where the persistent storage for the session will be mounted, usually it should be identical to or a parent of the working directory", + min_length=1, + ) + port: int = Field( + ..., + description="The TCP port (on any container in the session) where user requests will be routed to from the ingress", + gt=0, + lt=65400, + ) + command: Optional[List[str]] = Field( + None, + description="The command that will be run i.e. will overwrite the image Dockerfile ENTRYPOINT, equivalent to command in Kubernetes", + min_length=1, + ) + args: Optional[List[str]] = Field( + None, + description="The arguments that will follow the command, i.e. will overwrite the image Dockerfile CMD, equivalent to args in Kubernetes", + min_length=1, + ) + + +class EnvironmentGetInLauncher(Environment): + environment_kind: EnvironmentKind class EnvironmentPost(BaseAPISpec): @@ -97,12 +117,44 @@ class EnvironmentPost(BaseAPISpec): example="renku/renkulab-py:3.10-0.18.1", max_length=500, ) - default_url: Optional[str] = Field( - None, + default_url: str = Field( + "/lab", description="The default path to open in a session", example="/lab", max_length=200, ) + uid: int = Field( + 1000, description="The user ID used to run the session", gt=0, le=65535 + ) + gid: int = Field( + 1000, description="The group ID used to run the session", gt=0, le=65535 + ) + working_directory: str = Field( + "/home/jovyan/work", + description="The location where the session will start", + min_length=1, + ) + mount_directory: str = Field( + "/home/jovyan/work", + description="The location where the persistent storage for the session will be mounted, usually it should be identical to or a parent of the working directory", + min_length=1, + ) + port: int = Field( + 8080, + description="The TCP port (on any container in the session) where user requests will be routed to from the ingress", + gt=0, + lt=65400, + ) + command: Optional[List[str]] = Field( + None, + description="The command that will be run i.e. will overwrite the image Dockerfile ENTRYPOINT, equivalent to command in Kubernetes", + min_length=1, + ) + args: Optional[List[str]] = Field( + None, + description="The arguments that will follow the command, i.e. will overwrite the image Dockerfile CMD, equivalent to args in Kubernetes", + min_length=1, + ) class EnvironmentPatch(BaseAPISpec): @@ -131,6 +183,36 @@ class EnvironmentPatch(BaseAPISpec): example="/lab", max_length=200, ) + uid: Optional[int] = Field( + None, description="The user ID used to run the session", gt=0, le=65535 + ) + gid: Optional[int] = Field( + None, description="The group ID used to run the session", gt=0, le=65535 + ) + working_directory: Optional[str] = Field( + None, description="The location where the session will start", min_length=1 + ) + mount_directory: Optional[str] = Field( + None, + description="The location where the persistent storage for the session will be mounted, usually it should be identical to or a parent of the working directory", + min_length=1, + ) + port: Optional[int] = Field( + None, + description="The TCP port (on any container in the session) where user requests will be routed to from the ingress", + gt=0, + lt=65400, + ) + command: Optional[List[str]] = Field( + None, + description="The command that will be run i.e. will overwrite the image Dockerfile ENTRYPOINT, equivalent to command in Kubernetes", + min_length=1, + ) + args: Optional[List[str]] = Field( + None, + description="The arguments that will follow the command, i.e. will overwrite the image Dockerfile CMD, equivalent to args in Kubernetes", + min_length=1, + ) class SessionLauncher(BaseAPISpec): @@ -139,14 +221,14 @@ class SessionLauncher(BaseAPISpec): description="ULID identifier", max_length=26, min_length=26, - pattern="^[A-Z0-9]{26}$", + pattern="^[0-7][0-9A-HJKMNP-TV-Z]{25}$", ) project_id: str = Field( ..., description="ULID identifier", max_length=26, min_length=26, - pattern="^[A-Z0-9]{26}$", + pattern="^[0-7][0-9A-HJKMNP-TV-Z]{25}$", ) name: str = Field( ..., @@ -163,27 +245,45 @@ class SessionLauncher(BaseAPISpec): description: Optional[str] = Field( None, description="A description for the resource", max_length=500 ) - environment_kind: EnvironmentKind - environment_id: Optional[str] = Field( + environment: EnvironmentGetInLauncher + resource_class_id: Optional[int] = Field( + ..., description="The identifier of a resource class" + ) + + +class EnvironmentIdOnlyPatch(BaseAPISpec): + id: Optional[str] = Field( None, description="Id of the environment to use", example="01AN4Z79ZS6XX96588FDX0H099", min_length=1, ) - resource_class_id: Optional[int] = Field( - None, description="The identifier of a resource class" - ) - container_image: Optional[str] = Field( - None, - description="A container image", - example="renku/renkulab-py:3.10-0.18.1", - max_length=500, + + +class EnvironmentIdOnlyPost(BaseAPISpec): + id: str = Field( + ..., + description="Id of the environment to use", + example="01AN4Z79ZS6XX96588FDX0H099", + min_length=1, ) - default_url: Optional[str] = Field( - None, - description="The default path to open in a session", - example="/lab", - max_length=200, + + +class EnvironmentList(RootModel[List[Environment]]): + root: List[Environment] = Field(..., description="A list of session environments") + + +class EnvironmentPostInLauncher(EnvironmentPost): + environment_kind: EnvironmentKind + + +class EnvironmentPatchInLauncher(EnvironmentPatch): + environment_kind: Optional[EnvironmentKind] = None + + +class SessionLaunchersList(RootModel[List[SessionLauncher]]): + root: List[SessionLauncher] = Field( + ..., description="A list of Renku session launchers", min_length=0 ) @@ -203,33 +303,15 @@ class SessionLauncherPost(BaseAPISpec): description="ULID identifier", max_length=26, min_length=26, - pattern="^[A-Z0-9]{26}$", + pattern="^[0-7][0-9A-HJKMNP-TV-Z]{25}$", ) description: Optional[str] = Field( None, description="A description for the resource", max_length=500 ) - environment_kind: EnvironmentKind - environment_id: Optional[str] = Field( - None, - description="Id of the environment to use", - example="01AN4Z79ZS6XX96588FDX0H099", - min_length=1, - ) resource_class_id: Optional[int] = Field( None, description="The identifier of a resource class" ) - container_image: Optional[str] = Field( - None, - description="A container image", - example="renku/renkulab-py:3.10-0.18.1", - max_length=500, - ) - default_url: Optional[str] = Field( - None, - description="The default path to open in a session", - example="/lab", - max_length=200, - ) + environment: Union[EnvironmentPostInLauncher, EnvironmentIdOnlyPost] class SessionLauncherPatch(BaseAPISpec): @@ -246,44 +328,9 @@ class SessionLauncherPatch(BaseAPISpec): description: Optional[str] = Field( None, description="A description for the resource", max_length=500 ) - environment_kind: Optional[EnvironmentKind] = None - environment_id: Optional[str] = Field( - None, - description="Id of the environment to use", - example="01AN4Z79ZS6XX96588FDX0H099", - min_length=1, - ) - resource_class_id: Optional[int] = Field( - None, description="The identifier of a resource class" - ) - container_image: Optional[str] = Field( - None, - description="A container image", - example="renku/renkulab-py:3.10-0.18.1", - max_length=500, - ) - default_url: Optional[str] = Field( - None, - description="The default path to open in a session", - example="/lab", - max_length=200, - ) - - -class SessionStart(BaseAPISpec): - model_config = ConfigDict( - extra="allow", - ) resource_class_id: Optional[int] = Field( None, description="The identifier of a resource class" ) - - -class EnvironmentList(RootModel[List[Environment]]): - root: List[Environment] = Field(..., description="A list of session environments") - - -class SessionLaunchersList(RootModel[List[SessionLauncher]]): - root: List[SessionLauncher] = Field( - ..., description="A list of Renku session launchers", min_length=0 + environment: Optional[Union[EnvironmentPatchInLauncher, EnvironmentIdOnlyPatch]] = ( + None ) diff --git a/components/renku_data_services/session/apispec_base.py b/components/renku_data_services/session/apispec_base.py index a16833290..d91e73fb9 100644 --- a/components/renku_data_services/session/apispec_base.py +++ b/components/renku_data_services/session/apispec_base.py @@ -1,5 +1,7 @@ """Base models for API specifications.""" +from pathlib import PurePosixPath + from pydantic import BaseModel, field_validator from ulid import ULID @@ -12,8 +14,16 @@ class Config: from_attributes = True - @field_validator("id", mode="before", check_fields=False) + @field_validator("id", "project_id", mode="before", check_fields=False) @classmethod def serialize_id(cls, id: str | ULID) -> str: """Custom serializer that can handle ULIDs.""" return str(id) + + @field_validator("working_directory", "mount_directory", check_fields=False, mode="before") + @classmethod + def convert_path_to_string(cls, val: str | PurePosixPath) -> str: + """Converts the python path to a regular string when pydantic deserializes.""" + if isinstance(val, PurePosixPath): + return val.as_posix() + return val diff --git a/components/renku_data_services/session/blueprints.py b/components/renku_data_services/session/blueprints.py index 8fe42995b..75fbf25a4 100644 --- a/components/renku_data_services/session/blueprints.py +++ b/components/renku_data_services/session/blueprints.py @@ -1,6 +1,7 @@ """Session blueprint.""" from dataclasses import dataclass +from pathlib import PurePosixPath from sanic import HTTPResponse, Request, json from sanic.response import JSONResponse @@ -10,7 +11,7 @@ import renku_data_services.base_models as base_models from renku_data_services.base_api.auth import authenticate, validate_path_project_id from renku_data_services.base_api.blueprint import BlueprintFactoryResponse, CustomBlueprint -from renku_data_services.session import apispec +from renku_data_services.session import apispec, models from renku_data_services.session.db import SessionRepository @@ -47,7 +48,21 @@ def post(self) -> BlueprintFactoryResponse: @authenticate(self.authenticator) @validate(json=apispec.EnvironmentPost) async def _post(_: Request, user: base_models.APIUser, body: apispec.EnvironmentPost) -> JSONResponse: - environment = await self.session_repo.insert_environment(user=user, new_environment=body) + unsaved_environment = models.UnsavedEnvironment( + name=body.name, + description=body.description, + container_image=body.container_image, + default_url=body.default_url, + port=body.port, + working_directory=PurePosixPath(body.working_directory), + mount_directory=PurePosixPath(body.mount_directory), + uid=body.uid, + gid=body.gid, + environment_kind=models.EnvironmentKind.GLOBAL, + command=body.command, + args=body.args, + ) + environment = await self.session_repo.insert_environment(user=user, new_environment=unsaved_environment) return json(apispec.Environment.model_validate(environment).model_dump(exclude_none=True, mode="json"), 201) return "/environments", ["POST"], _post @@ -117,7 +132,32 @@ def post(self) -> BlueprintFactoryResponse: @authenticate(self.authenticator) @validate(json=apispec.SessionLauncherPost) async def _post(_: Request, user: base_models.APIUser, body: apispec.SessionLauncherPost) -> JSONResponse: - launcher = await self.session_repo.insert_launcher(user=user, new_launcher=body) + environment: str | models.UnsavedEnvironment + if isinstance(body.environment, apispec.EnvironmentIdOnlyPost): + environment = body.environment.id + else: + environment = models.UnsavedEnvironment( + name=body.environment.name, + description=body.environment.description, + container_image=body.environment.container_image, + default_url=body.environment.default_url, + port=body.environment.port, + working_directory=PurePosixPath(body.environment.working_directory), + mount_directory=PurePosixPath(body.environment.mount_directory), + uid=body.environment.uid, + gid=body.environment.gid, + environment_kind=models.EnvironmentKind(body.environment.environment_kind.value), + args=body.environment.args, + command=body.environment.command, + ) + new_launcher = models.UnsavedSessionLauncher( + project_id=ULID.from_str(body.project_id), + name=body.name, + description=body.description, + environment=environment, + resource_class_id=body.resource_class_id, + ) + launcher = await self.session_repo.insert_launcher(user=user, new_launcher=new_launcher) return json( apispec.SessionLauncher.model_validate(launcher).model_dump(exclude_none=True, mode="json"), 201 ) @@ -132,8 +172,35 @@ def patch(self) -> BlueprintFactoryResponse: async def _patch( _: Request, user: base_models.APIUser, launcher_id: ULID, body: apispec.SessionLauncherPatch ) -> JSONResponse: - body_dict = body.model_dump(exclude_none=True) - launcher = await self.session_repo.update_launcher(user=user, launcher_id=launcher_id, **body_dict) + body_dict = body.model_dump(exclude_none=True, mode="json") + async with self.session_repo.session_maker() as session, session.begin(): + current_launcher = await self.session_repo.get_launcher(user, launcher_id) + new_env: models.UnsavedEnvironment | None = None + if ( + isinstance(body.environment, apispec.EnvironmentPatchInLauncher) + and current_launcher.environment.environment_kind == models.EnvironmentKind.GLOBAL + and body.environment.environment_kind == apispec.EnvironmentKind.CUSTOM + ): + # This means that the global environment is being swapped for a custom one, + # so we have to create a brand new environment, but we have to validate here. + validated_env = apispec.EnvironmentPostInLauncher.model_validate(body_dict.pop("environment")) + new_env = models.UnsavedEnvironment( + name=validated_env.name, + description=validated_env.description, + container_image=validated_env.container_image, + default_url=validated_env.default_url, + port=validated_env.port, + working_directory=PurePosixPath(validated_env.working_directory), + mount_directory=PurePosixPath(validated_env.mount_directory), + uid=validated_env.uid, + gid=validated_env.gid, + environment_kind=models.EnvironmentKind(validated_env.environment_kind.value), + args=validated_env.args, + command=validated_env.command, + ) + launcher = await self.session_repo.update_launcher( + user=user, launcher_id=launcher_id, new_custom_environment=new_env, session=session, **body_dict + ) return json(apispec.SessionLauncher.model_validate(launcher).model_dump(exclude_none=True, mode="json")) return "/session_launchers/", ["PATCH"], _patch diff --git a/components/renku_data_services/session/db.py b/components/renku_data_services/session/db.py index 06f9c89b7..417820e40 100644 --- a/components/renku_data_services/session/db.py +++ b/components/renku_data_services/session/db.py @@ -3,6 +3,7 @@ from __future__ import annotations from collections.abc import Callable +from contextlib import AbstractAsyncContextManager, nullcontext from datetime import UTC, datetime from typing import Any @@ -15,9 +16,8 @@ from renku_data_services.authz.authz import Authz, ResourceType from renku_data_services.authz.models import Scope from renku_data_services.crc.db import ResourcePoolRepository -from renku_data_services.session import apispec, models +from renku_data_services.session import models from renku_data_services.session import orm as schemas -from renku_data_services.session.apispec import EnvironmentKind class SessionRepository: @@ -31,17 +31,23 @@ def __init__( self.resource_pools: ResourcePoolRepository = resource_pools async def get_environments(self) -> list[models.Environment]: - """Get all session environments from the database.""" + """Get all global session environments from the database.""" async with self.session_maker() as session: - res = await session.scalars(select(schemas.EnvironmentORM)) + res = await session.scalars( + select(schemas.EnvironmentORM).where( + schemas.EnvironmentORM.environment_kind == models.EnvironmentKind.GLOBAL.value + ) + ) environments = res.all() return [e.dump() for e in environments] async def get_environment(self, environment_id: ULID) -> models.Environment: - """Get one session environment from the database.""" + """Get one global session environment from the database.""" async with self.session_maker() as session: res = await session.scalars( - select(schemas.EnvironmentORM).where(schemas.EnvironmentORM.id == str(environment_id)) + select(schemas.EnvironmentORM) + .where(schemas.EnvironmentORM.id == str(environment_id)) + .where(schemas.EnvironmentORM.environment_kind == models.EnvironmentKind.GLOBAL.value) ) environment = res.one_or_none() if environment is None: @@ -50,64 +56,109 @@ async def get_environment(self, environment_id: ULID) -> models.Environment: ) return environment.dump() - async def insert_environment( + async def __insert_environment( self, user: base_models.APIUser, - new_environment: apispec.EnvironmentPost, - ) -> models.Environment: - """Insert a new session environment.""" + session: AsyncSession, + new_environment: models.UnsavedEnvironment, + ) -> schemas.EnvironmentORM: if user.id is None: - raise errors.UnauthorizedError(message="You do not have the required permissions for this operation.") - if not user.is_admin: - raise errors.ForbiddenError(message="You do not have the required permissions for this operation.") - - environment_model = models.Environment( - id=None, + raise errors.UnauthorizedError( + message="You have to be authenticated to insert an environment in the DB.", quiet=True + ) + environment = schemas.EnvironmentORM( name=new_environment.name, + created_by_id=user.id, + creation_date=datetime.now(UTC), description=new_environment.description, container_image=new_environment.container_image, default_url=new_environment.default_url, - created_by=models.Member(id=user.id), - creation_date=datetime.now(UTC).replace(microsecond=0), + port=new_environment.port, + working_directory=new_environment.working_directory, + mount_directory=new_environment.mount_directory, + uid=new_environment.uid, + gid=new_environment.gid, + environment_kind=new_environment.environment_kind, + command=new_environment.command, + args=new_environment.args, ) - environment = schemas.EnvironmentORM.load(environment_model) + + session.add(environment) + return environment + + async def insert_environment( + self, user: base_models.APIUser, new_environment: models.UnsavedEnvironment + ) -> models.Environment: + """Insert a new global session environment.""" + if user.id is None or not user.is_admin: + raise errors.UnauthorizedError( + message="You do not have the required permissions for this operation.", quiet=True + ) + if new_environment.environment_kind != models.EnvironmentKind.GLOBAL: + raise errors.ValidationError(message="This endpoint only supports adding global environments", quiet=True) async with self.session_maker() as session, session.begin(): - session.add(environment) - return environment.dump() + env = await self.__insert_environment(user, session, new_environment) + return env.dump() + + async def __update_environment( + self, + user: base_models.APIUser, + session: AsyncSession, + environment_id: ULID, + kind: models.EnvironmentKind, + **kwargs: dict, + ) -> models.Environment: + res = await session.scalars( + select(schemas.EnvironmentORM) + .where(schemas.EnvironmentORM.id == str(environment_id)) + .where(schemas.EnvironmentORM.environment_kind == kind.value) + ) + environment = res.one_or_none() + if environment is None: + raise errors.MissingResourceError(message=f"Session environment with id '{environment_id}' does not exist.") + + for key, value in kwargs.items(): + # NOTE: Only some fields can be edited + if key in [ + "name", + "description", + "container_image", + "default_url", + "port", + "working_directory", + "mount_directory", + "uid", + "gid", + "args", + "command", + ]: + setattr(environment, key, value) + + return environment.dump() async def update_environment( self, user: base_models.APIUser, environment_id: ULID, **kwargs: dict ) -> models.Environment: - """Update a session environment entry.""" + """Update a global session environment entry.""" if not user.is_admin: - raise errors.ForbiddenError(message="You do not have the required permissions for this operation.") + raise errors.UnauthorizedError(message="You do not have the required permissions for this operation.") async with self.session_maker() as session, session.begin(): - res = await session.scalars( - select(schemas.EnvironmentORM).where(schemas.EnvironmentORM.id == str(environment_id)) + return await self.__update_environment( + user, session, environment_id, models.EnvironmentKind.GLOBAL, **kwargs ) - environment = res.one_or_none() - if environment is None: - raise errors.MissingResourceError( - message=f"Session environment with id '{environment_id}' does not exist." - ) - - for key, value in kwargs.items(): - # NOTE: Only ``name``, ``description``, ``container_image`` and ``default_url`` can be edited - if key in ["name", "description", "container_image", "default_url"]: - setattr(environment, key, value) - - return environment.dump() async def delete_environment(self, user: base_models.APIUser, environment_id: ULID) -> None: - """Delete a session environment entry.""" + """Delete a global session environment entry.""" if not user.is_admin: raise errors.ForbiddenError(message="You do not have the required permissions for this operation.") async with self.session_maker() as session, session.begin(): res = await session.scalars( - select(schemas.EnvironmentORM).where(schemas.EnvironmentORM.id == str(environment_id)) + select(schemas.EnvironmentORM) + .where(schemas.EnvironmentORM.id == str(environment_id)) + .where(schemas.EnvironmentORM.environment_kind == models.EnvironmentKind.GLOBAL.value) ) environment = res.one_or_none() @@ -171,37 +222,19 @@ async def get_launcher(self, user: base_models.APIUser, launcher_id: ULID) -> mo return launcher.dump() async def insert_launcher( - self, user: base_models.APIUser, new_launcher: apispec.SessionLauncherPost + self, user: base_models.APIUser, new_launcher: models.UnsavedSessionLauncher ) -> models.SessionLauncher: """Insert a new session launcher.""" if not user.is_authenticated or user.id is None: raise errors.UnauthorizedError(message="You do not have the required permissions for this operation.") project_id = new_launcher.project_id - authorized = await self.project_authz.has_permission( - user, ResourceType.project, ULID.from_str(project_id), Scope.WRITE - ) + authorized = await self.project_authz.has_permission(user, ResourceType.project, project_id, Scope.WRITE) if not authorized: raise errors.MissingResourceError( message=f"Project with id '{project_id}' does not exist or you do not have access to it." ) - launcher_model = models.SessionLauncher( - id=None, - name=new_launcher.name, - project_id=new_launcher.project_id, - description=new_launcher.description, - environment_kind=new_launcher.environment_kind, - environment_id=new_launcher.environment_id, - resource_class_id=new_launcher.resource_class_id, - container_image=new_launcher.container_image, - default_url=new_launcher.default_url, - created_by=models.Member(id=user.id), - creation_date=datetime.now(UTC).replace(microsecond=0), - ) - - models.SessionLauncher.model_validate(launcher_model) - async with self.session_maker() as session, session.begin(): res = await session.scalars(select(schemas.ProjectORM).where(schemas.ProjectORM.id == project_id)) project = res.one_or_none() @@ -210,16 +243,26 @@ async def insert_launcher( message=f"Project with id '{project_id}' does not exist or you do not have access to it." ) - environment_id = new_launcher.environment_id - if environment_id is not None: - res = await session.scalars( - select(schemas.EnvironmentORM).where(schemas.EnvironmentORM.id == environment_id) + environment_id: ULID + environment: models.Environment + environment_orm: schemas.EnvironmentORM | None + if isinstance(new_launcher.environment, models.UnsavedEnvironment): + environment_orm = await self.__insert_environment(user, session, new_launcher.environment) + environment = environment_orm.dump() + environment_id = environment.id + else: + environment_id = ULID.from_str(new_launcher.environment) + res_env = await session.scalars( + select(schemas.EnvironmentORM) + .where(schemas.EnvironmentORM.id == environment_id) + .where(schemas.EnvironmentORM.environment_kind == models.EnvironmentKind.GLOBAL.value) ) - environment = res.one_or_none() - if environment is None: + environment_orm = res_env.one_or_none() + if environment_orm is None: raise errors.MissingResourceError( message=f"Session environment with id '{environment_id}' does not exist or you do not have access to it." # noqa: E501 ) + environment = environment_orm.dump() resource_class_id = new_launcher.resource_class_id if resource_class_id is not None: @@ -239,25 +282,49 @@ async def insert_launcher( message=f"You do not have access to resource class with id '{resource_class_id}'." ) - launcher = schemas.SessionLauncherORM.load(launcher_model) + launcher = schemas.SessionLauncherORM( + name=new_launcher.name, + created_by_id=user.id, + creation_date=datetime.now(UTC), + description=new_launcher.description, + project_id=new_launcher.project_id, + environment_id=environment_id, + resource_class_id=new_launcher.resource_class_id, + ) session.add(launcher) + await session.flush() + await session.refresh(launcher) return launcher.dump() async def update_launcher( - self, user: base_models.APIUser, launcher_id: ULID, **kwargs: Any + self, + user: base_models.APIUser, + launcher_id: ULID, + new_custom_environment: models.UnsavedEnvironment | None, + session: AsyncSession | None = None, + **kwargs: Any, ) -> models.SessionLauncher: """Update a session launcher entry.""" if not user.is_authenticated or user.id is None: raise errors.UnauthorizedError(message="You do not have the required permissions for this operation.") - async with self.session_maker() as session, session.begin(): + session_ctx: AbstractAsyncContextManager = nullcontext() + tx: AbstractAsyncContextManager = nullcontext() + if not session: + session = self.session_maker() + session_ctx = session + if not session.in_transaction(): + tx = session.begin() + + async with session_ctx, tx: res = await session.scalars( select(schemas.SessionLauncherORM).where(schemas.SessionLauncherORM.id == launcher_id) ) launcher = res.one_or_none() if launcher is None: raise errors.MissingResourceError( - message=f"Session launcher with id '{launcher_id}' does not exist or you do not have access to it." # noqa: E501 + message=f"Session launcher with id '{launcher_id}' does not " + "exist or you do not have access to it." ) authorized = await self.project_authz.has_permission( @@ -269,17 +336,6 @@ async def update_launcher( if not authorized: raise errors.ForbiddenError(message="You do not have the required permissions for this operation.") - environment_id = kwargs.get("environment_id") - if environment_id is not None: - res = await session.scalars( - select(schemas.EnvironmentORM).where(schemas.EnvironmentORM.id == environment_id) - ) - environment = res.one_or_none() - if environment is None: - raise errors.MissingResourceError( - message=f"Session environment with id '{environment_id}' does not exist or you do not have access to it." # noqa: E501 - ) - resource_class_id = kwargs.get("resource_class_id") if resource_class_id is not None: res = await session.scalars( @@ -299,29 +355,86 @@ async def update_launcher( ) for key, value in kwargs.items(): - # NOTE: Only ``name``, ``description``, ``environment_kind``, - # ``environment_id``, ``resource_class_id``, ``container_image`` and - # ``default_url`` can be edited. + # NOTE: Only some fields can be updated. if key in [ "name", "description", - "environment_kind", - "environment_id", "resource_class_id", - "container_image", - "default_url", ]: setattr(launcher, key, value) - if launcher.environment_kind == EnvironmentKind.global_environment: - launcher.container_image = None - if launcher.environment_kind == EnvironmentKind.container_image: - launcher.environment = None - - launcher_model = launcher.dump() - models.SessionLauncher.model_validate(launcher_model) + env_payload = kwargs.get("environment", {}) + await self.__update_launcher_environment(user, launcher, session, new_custom_environment, **env_payload) + return launcher.dump() - return launcher_model + async def __update_launcher_environment( + self, + user: base_models.APIUser, + launcher: schemas.SessionLauncherORM, + session: AsyncSession, + new_custom_environment: models.UnsavedEnvironment | None, + **kwargs: Any, + ) -> None: + current_env_kind = launcher.environment.environment_kind + match new_custom_environment, current_env_kind, kwargs: + case None, _, {"id": env_id, **nothing_else} if len(nothing_else) == 0: + # The environment in the launcher is set via ID, the new ID has to refer + # to an environment that is GLOBAL. + old_environment = launcher.environment + new_environment_id = ULID.from_str(env_id) + res_env = await session.scalars( + select(schemas.EnvironmentORM).where(schemas.EnvironmentORM.id == new_environment_id) + ) + new_environment = res_env.one_or_none() + if new_environment is None: + raise errors.MissingResourceError( + message=f"Session environment with id '{new_environment_id}' does not exist or " + "you do not have access to it." + ) + if new_environment.environment_kind != models.EnvironmentKind.GLOBAL: + raise errors.ValidationError( + message="Cannot set the environment for a launcher to an existing environment if that " + "existing environment is not global", + quiet=True, + ) + launcher.environment_id = new_environment_id + launcher.environment = new_environment + if old_environment.environment_kind == models.EnvironmentKind.CUSTOM: + # A custom environment exists but it is being updated to a global one + # We remove the custom environment to avoid accumulating custom environments that are not associated + # with any launchers. + await session.delete(old_environment) + case None, models.EnvironmentKind.CUSTOM, {**rest} if ( + rest.get("environment_kind") is None + or rest.get("environment_kind") == models.EnvironmentKind.CUSTOM.value + ): + # Custom environment being updated + for key, val in rest.items(): + # NOTE: Only some fields can be updated. + if key in [ + "name", + "description", + "container_image", + "default_url", + "port", + "working_directory", + "mount_directory", + "uid", + "gid", + "args", + "command", + ]: + setattr(launcher.environment, key, val) + case models.UnsavedEnvironment(), models.EnvironmentKind.GLOBAL, {**nothing_else} if ( + len(nothing_else) == 0 and new_custom_environment.environment_kind == models.EnvironmentKind.CUSTOM + ): + # Global environment replaced by a custom one + new_env = await self.__insert_environment(user, session, new_custom_environment) + launcher.environment = new_env + case _: + raise errors.ValidationError( + message="Encountered an invalid payload for updating a launcher environment", quiet=True + ) async def delete_launcher(self, user: base_models.APIUser, launcher_id: ULID) -> None: """Delete a session launcher entry.""" @@ -347,3 +460,5 @@ async def delete_launcher(self, user: base_models.APIUser, launcher_id: ULID) -> raise errors.ForbiddenError(message="You do not have the required permissions for this operation.") await session.delete(launcher) + if launcher.environment.environment_kind == models.EnvironmentKind.CUSTOM: + await session.delete(launcher.environment) diff --git a/components/renku_data_services/session/models.py b/components/renku_data_services/session/models.py index 422524152..6dcff46c2 100644 --- a/components/renku_data_services/session/models.py +++ b/components/renku_data_services/session/models.py @@ -2,62 +2,100 @@ from dataclasses import dataclass from datetime import datetime +from enum import StrEnum +from pathlib import PurePosixPath -from pydantic import BaseModel, model_validator from ulid import ULID from renku_data_services import errors -from renku_data_services.session.apispec import EnvironmentKind -@dataclass(frozen=True, eq=True, kw_only=True) -class Member(BaseModel): - """Member model.""" +class EnvironmentKind(StrEnum): + """The type of environment.""" - id: str + GLOBAL: str = "GLOBAL" + CUSTOM: str = "CUSTOM" -@dataclass(frozen=True, eq=True, kw_only=True) -class Environment(BaseModel): - """Session environment model.""" +@dataclass(kw_only=True, frozen=True, eq=True) +class BaseEnvironment: + """Base session environment model.""" - id: str | None name: str - creation_date: datetime description: str | None container_image: str - default_url: str | None - created_by: Member + default_url: str + port: int + working_directory: PurePosixPath + mount_directory: PurePosixPath + uid: int + gid: int + environment_kind: EnvironmentKind + args: list[str] | None = None + command: list[str] | None = None + + +@dataclass(kw_only=True, frozen=True, eq=True) +class UnsavedEnvironment(BaseEnvironment): + """Session environment model that has not been saved.""" + + port: int = 8888 + description: str | None = None + working_directory: PurePosixPath = PurePosixPath("/home/jovyan/work") + mount_directory: PurePosixPath = PurePosixPath("/home/jovyan/work") + uid: int = 1000 + gid: int = 1000 + + def __post_init__(self) -> None: + if not self.working_directory.is_absolute(): + raise errors.ValidationError(message="The working directory for a session is supposed to be absolute") + if not self.mount_directory.is_absolute(): + raise errors.ValidationError(message="The mount directory for a session is supposed to be absolute") + if self.working_directory.is_reserved(): + raise errors.ValidationError( + message="The requested value for the working directory is reserved by the OS and cannot be used." + ) + if self.mount_directory.is_reserved(): + raise errors.ValidationError( + message="The requested value for the mount directory is reserved by the OS and cannot be used." + ) + + +@dataclass(kw_only=True, frozen=True, eq=True) +class Environment(BaseEnvironment): + """Session environment model that has been saved in the DB.""" + + id: ULID + creation_date: datetime + created_by: str @dataclass(frozen=True, eq=True, kw_only=True) -class SessionLauncher(BaseModel): +class BaseSessionLauncher: """Session launcher model.""" id: ULID | None - project_id: str + project_id: ULID name: str - creation_date: datetime description: str | None - environment_kind: EnvironmentKind - environment_id: str | None + environment: str | UnsavedEnvironment | Environment resource_class_id: int | None - container_image: str | None - default_url: str | None - created_by: Member - @model_validator(mode="after") - def check_launcher_environment_kind(self) -> "SessionLauncher": - """Validates the environment of a launcher.""" - environment_kind = self.environment_kind - environment_id = self.environment_id - container_image = self.container_image +@dataclass(frozen=True, eq=True, kw_only=True) +class UnsavedSessionLauncher(BaseSessionLauncher): + """Session launcher model that has not been persisted in the DB.""" - if environment_kind == EnvironmentKind.global_environment and environment_id is None: - raise errors.ValidationError(message="'environment_id' not set when environment_kind=global_environment") + id: ULID | None = None + environment: str | UnsavedEnvironment + """When a string is passed for the environment it should be the ID of an existing environment.""" - if environment_kind == EnvironmentKind.container_image and container_image is None: - raise errors.ValidationError(message="'container_image' not set when environment_kind=container_image") - return self +@dataclass(frozen=True, eq=True, kw_only=True) +class SessionLauncher(BaseSessionLauncher): + """Session launcher model that has been already saved in the DB.""" + + id: ULID + creation_date: datetime + created_by: str + environment: Environment diff --git a/components/renku_data_services/session/orm.py b/components/renku_data_services/session/orm.py index 4b61d548c..2a7cc855d 100644 --- a/components/renku_data_services/session/orm.py +++ b/components/renku_data_services/session/orm.py @@ -1,8 +1,10 @@ """SQLAlchemy's schemas for the sessions database.""" from datetime import datetime +from pathlib import PurePosixPath -from sqlalchemy import DateTime, MetaData, String +from sqlalchemy import JSON, DateTime, MetaData, String +from sqlalchemy.dialects.postgresql import JSONB from sqlalchemy.orm import DeclarativeBase, Mapped, MappedAsDataclass, mapped_column, relationship from sqlalchemy.schema import ForeignKey from ulid import ULID @@ -10,10 +12,10 @@ from renku_data_services.crc.orm import ResourceClassORM from renku_data_services.project.orm import ProjectORM from renku_data_services.session import models -from renku_data_services.session.apispec import EnvironmentKind -from renku_data_services.utils.sqlalchemy import ULIDType +from renku_data_services.utils.sqlalchemy import PurePosixPathType, ULIDType metadata_obj = MetaData(schema="sessions") # Has to match alembic ini section name +JSONVariant = JSON().with_variant(JSONB(), "postgresql") class BaseORM(MappedAsDataclass, DeclarativeBase): @@ -27,7 +29,7 @@ class EnvironmentORM(BaseORM): __tablename__ = "environments" - id: Mapped[str] = mapped_column("id", String(26), primary_key=True, default_factory=lambda: str(ULID()), init=False) + id: Mapped[ULID] = mapped_column("id", ULIDType, primary_key=True, default_factory=lambda: str(ULID()), init=False) """Id of this session environment object.""" name: Mapped[str] = mapped_column("name", String(99)) @@ -45,31 +47,36 @@ class EnvironmentORM(BaseORM): container_image: Mapped[str] = mapped_column("container_image", String(500)) """Container image repository and tag.""" - default_url: Mapped[str | None] = mapped_column("default_url", String(200)) + default_url: Mapped[str] = mapped_column("default_url", String(200)) """Default URL path to open in a session.""" - @classmethod - def load(cls, environment: models.Environment) -> "EnvironmentORM": - """Create EnvironmentORM from the session environment model.""" - return cls( - name=environment.name, - created_by_id=environment.created_by.id, - creation_date=environment.creation_date, - description=environment.description, - container_image=environment.container_image, - default_url=environment.default_url, - ) + port: Mapped[int] = mapped_column("port") + working_directory: Mapped[PurePosixPath] = mapped_column("working_directory", PurePosixPathType) + mount_directory: Mapped[PurePosixPath] = mapped_column("mount_directory", PurePosixPathType) + uid: Mapped[int] = mapped_column("uid") + gid: Mapped[int] = mapped_column("gid") + environment_kind: Mapped[models.EnvironmentKind] = mapped_column("environment_kind") + args: Mapped[list[str] | None] = mapped_column("args", JSONVariant, nullable=True) + command: Mapped[list[str] | None] = mapped_column("command", JSONVariant, nullable=True) def dump(self) -> models.Environment: """Create a session environment model from the EnvironmentORM.""" return models.Environment( id=self.id, name=self.name, - created_by=models.Member(id=self.created_by_id), + created_by=self.created_by_id, creation_date=self.creation_date, description=self.description, container_image=self.container_image, default_url=self.default_url, + gid=self.gid, + uid=self.uid, + environment_kind=self.environment_kind, + mount_directory=self.mount_directory, + working_directory=self.working_directory, + port=self.port, + args=self.args, + command=self.command, ) @@ -93,24 +100,15 @@ class SessionLauncherORM(BaseORM): description: Mapped[str | None] = mapped_column("description", String(500)) """Human-readable description of the session launcher.""" - environment_kind: Mapped[EnvironmentKind] - """The kind of environment definition to use.""" - - container_image: Mapped[str | None] = mapped_column("container_image", String(500)) - """Container image repository and tag.""" - - default_url: Mapped[str | None] = mapped_column("default_url", String(200)) - """Default URL path to open in a session.""" - project: Mapped[ProjectORM] = relationship(init=False) - environment: Mapped[EnvironmentORM | None] = relationship(init=False) + environment: Mapped[EnvironmentORM] = relationship(init=False, lazy="joined") project_id: Mapped[ULID] = mapped_column( "project_id", ForeignKey(ProjectORM.id, ondelete="CASCADE"), default=None, index=True ) """Id of the project this session belongs to.""" - environment_id: Mapped[str | None] = mapped_column( + environment_id: Mapped[ULID] = mapped_column( "environment_id", ForeignKey(EnvironmentORM.id), default=None, nullable=True, index=True ) """Id of the session environment.""" @@ -129,29 +127,23 @@ def load(cls, launcher: models.SessionLauncher) -> "SessionLauncherORM": """Create SessionLauncherORM from the session launcher model.""" return cls( name=launcher.name, - created_by_id=launcher.created_by.id, + created_by_id=launcher.created_by, creation_date=launcher.creation_date, description=launcher.description, - environment_kind=launcher.environment_kind, - container_image=launcher.container_image, project_id=ULID.from_str(launcher.project_id), - environment_id=launcher.environment_id, + environment_id=launcher.environment.id, resource_class_id=launcher.resource_class_id, - default_url=launcher.default_url, ) def dump(self) -> models.SessionLauncher: """Create a session launcher model from the SessionLauncherORM.""" return models.SessionLauncher( id=self.id, - project_id=str(self.project_id), + project_id=self.project_id, name=self.name, - created_by=models.Member(id=self.created_by_id), + created_by=self.created_by_id, creation_date=self.creation_date, description=self.description, - environment_kind=self.environment_kind, - environment_id=self.environment_id if self.environment_id is not None else None, - resource_class_id=self.resource_class_id if self.resource_class_id is not None else None, - container_image=self.container_image, - default_url=self.default_url, + environment=self.environment.dump(), + resource_class_id=self.resource_class_id, ) diff --git a/components/renku_data_services/storage/api.spec.yaml b/components/renku_data_services/storage/api.spec.yaml index 4c24ee653..c8485daf7 100644 --- a/components/renku_data_services/storage/api.spec.yaml +++ b/components/renku_data_services/storage/api.spec.yaml @@ -1,4 +1,3 @@ ---- openapi: 3.0.2 info: title: Renku Data Services API @@ -10,167 +9,6 @@ servers: - url: /api/data - url: /ui-server/api/data paths: - /storages_v2/{storage_id}: - parameters: - - in: path - name: storage_id - required: true - schema: - $ref: "#/components/schemas/UlidId" - description: the id of the storage - get: - summary: get cloud storage details - responses: - "200": - description: Found the cloud storage - content: - "application/json": - schema: - $ref: "#/components/schemas/CloudStorageGetV2" - default: - $ref: '#/components/responses/Error' - tags: - - storages_v2 - patch: - summary: partially update a cloud storage entry - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/CloudStoragePatch" - responses: - "201": - description: The cloud storage entry was updated - content: - "application/json": - schema: - $ref: "#/components/schemas/CloudStorageGet" - default: - $ref: '#/components/responses/Error' - tags: - - storages_v2 - delete: - summary: remove a cloud storage definition - responses: - "204": - description: The rcloud storage was removed or did not exist in the first place - default: - $ref: '#/components/responses/Error' - tags: - - storages_v2 - /storages_v2: - get: - summary: get cloud storage for a project by id - parameters: - - in: query - description: query parameters - name: storage_v2_params - style: form - explode: true - schema: - type: object - additionalProperties: false - properties: - project_id: - $ref: "#/components/schemas/UlidId" - required: - - project_id - responses: - "200": - description: the storage configurations for the project - content: - "application/json": - schema: - type: array - items: - $ref: "#/components/schemas/CloudStorageGetV2" - "404": - description: Storage was not found - content: - "application/json": - schema: - $ref: "#/components/schemas/ErrorResponse" - default: - $ref: '#/components/responses/Error' - tags: - - storages_v2 - post: - summary: create a new cloud storage for a project - requestBody: - required: true - content: - application/json: - schema: - oneOf: - - $ref: "#/components/schemas/CloudStorage" - - $ref: "#/components/schemas/CloudStorageUrl" - responses: - "201": - description: The cloud storage entry was created - content: - "application/json": - schema: - $ref: "#/components/schemas/CloudStorageGet" - default: - $ref: '#/components/responses/Error' - tags: - - storages_v2 - /storages_v2/{storage_id}/secrets: - parameters: - - in: path - name: storage_id - required: true - schema: - $ref: "#/components/schemas/UlidId" - description: The id of the storage - get: - summary: Get all saved secrets for a cloud storage - responses: - "200": - description: The saved storage secrets - content: - "application/json": - schema: - $ref: "#/components/schemas/CloudStorageSecretGetList" - "404": - description: Storage was not found - content: - "application/json": - schema: - $ref: "#/components/schemas/ErrorResponse" - default: - $ref: "#/components/responses/Error" - tags: - - storages_v2 - post: - summary: Save secrets for a cloud storage - requestBody: - required: true - content: - application/json: - schema: - $ref: "#/components/schemas/CloudStorageSecretPostList" - responses: - "201": - description: The secrets for cloud storage were saved - content: - "application/json": - schema: - $ref: "#/components/schemas/CloudStorageSecretGetList" - default: - $ref: "#/components/responses/Error" - tags: - - storages_v2 - delete: - summary: Remove all saved secrets for a storage - responses: - "204": - description: The secrets were removed or did not exist in the first place or the storage doesn't exist - default: - $ref: "#/components/responses/Error" - tags: - - storages_v2 /storage/{storage_id}: parameters: - in: path @@ -495,59 +333,6 @@ components: $ref: "#/components/schemas/RCloneOption" required: - storage - CloudStorageGetV2: - description: Get response for a V2 cloud storage. Contains field name and secret ID for saved secrets. - allOf: - - $ref: "#/components/schemas/CloudStorageGet" - - type: object - properties: - secrets: - type: array - items: - $ref: "#/components/schemas/CloudStorageSecretGet" - CloudStorageSecretPost: - type: object - description: Data for storing secret for a storage field - properties: - name: - type: string - description: Name of the field to store credential for - minLength: 1 - maxLength: 99 - value: - $ref: "#/components/schemas/SecretValue" - required: - - name - - value - CloudStorageSecretPostList: - description: List of storage secrets that are saved - type: array - items: - $ref: "#/components/schemas/CloudStorageSecretPost" - CloudStorageSecretGetList: - description: List of storage secrets that are saved - type: array - items: - $ref: "#/components/schemas/CloudStorageSecretGet" - CloudStorageSecretGet: - type: object - description: Data for saved storage secrets - properties: - name: - type: string - description: Name of the field to store credential for - minLength: 1 - maxLength: 99 - secret_id: - $ref: "#/components/schemas/UlidId" - required: - - name - - secret_id - SecretValue: - description: Secret value that can be any text - type: string - minLength: 1 - maxLength: 5000 RCloneSchema: description: List of RClone schemas for different storage types type: array diff --git a/components/renku_data_services/storage/apispec.py b/components/renku_data_services/storage/apispec.py index d72e04dc9..b72a9163f 100644 --- a/components/renku_data_services/storage/apispec.py +++ b/components/renku_data_services/storage/apispec.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: api.spec.yaml -# timestamp: 2024-08-13T13:29:46+00:00 +# timestamp: 2024-09-04T09:46:37+00:00 from __future__ import annotations @@ -101,23 +101,6 @@ class ErrorResponse(BaseAPISpec): error: Error -class StorageV2Params(BaseAPISpec): - model_config = ConfigDict( - extra="forbid", - ) - project_id: str = Field( - ..., - description="ULID identifier of an object", - max_length=26, - min_length=26, - pattern="^[A-Z0-9]+$", - ) - - -class StoragesV2GetParametersQuery(BaseAPISpec): - storage_v2_params: Optional[StorageV2Params] = None - - class StorageParams(BaseAPISpec): model_config = ConfigDict( extra="forbid", @@ -235,43 +218,6 @@ class CloudStorageGet(BaseAPISpec): sensitive_fields: Optional[List[RCloneOption]] = None -class CloudStorageSecretPost(BaseAPISpec): - name: str = Field( - ..., - description="Name of the field to store credential for", - max_length=99, - min_length=1, - ) - value: str = Field( - ..., - description="Secret value that can be any text", - max_length=5000, - min_length=1, - ) - - -class CloudStorageSecretPostList(RootModel[List[CloudStorageSecretPost]]): - root: List[CloudStorageSecretPost] = Field( - ..., description="List of storage secrets that are saved" - ) - - -class CloudStorageSecretGet(BaseAPISpec): - name: str = Field( - ..., - description="Name of the field to store credential for", - max_length=99, - min_length=1, - ) - secret_id: str = Field( - ..., - description="ULID identifier of an object", - max_length=26, - min_length=26, - pattern="^[A-Z0-9]+$", - ) - - class RCloneEntry(BaseAPISpec): name: Optional[str] = Field(None, description="Human readable name of the provider") description: Optional[str] = Field(None, description="description of the provider") @@ -283,10 +229,6 @@ class RCloneEntry(BaseAPISpec): ) -class StoragesV2PostRequest(RootModel[Union[CloudStorage, CloudStorageUrl]]): - root: Union[CloudStorage, CloudStorageUrl] - - class StorageStorageIdPutRequest(RootModel[Union[CloudStorage, CloudStorageUrl]]): root: Union[CloudStorage, CloudStorageUrl] @@ -303,21 +245,7 @@ class StoragePostRequest(RootModel[Union[CloudStorage, CloudStorageUrl]]): root: Union[CloudStorage, CloudStorageUrl] -class CloudStorageGetV2(CloudStorageGet): - secrets: Optional[List[CloudStorageSecretGet]] = None - - -class CloudStorageSecretGetList(RootModel[List[CloudStorageSecretGet]]): - root: List[CloudStorageSecretGet] = Field( - ..., description="List of storage secrets that are saved" - ) - - class RCloneSchema(RootModel[List[RCloneEntry]]): root: List[RCloneEntry] = Field( ..., description="List of RClone schemas for different storage types" ) - - -class StoragesV2GetResponse(RootModel[List[CloudStorageGetV2]]): - root: List[CloudStorageGetV2] diff --git a/components/renku_data_services/storage/blueprints.py b/components/renku_data_services/storage/blueprints.py index 5fb3c4b38..4ae20b1d1 100644 --- a/components/renku_data_services/storage/blueprints.py +++ b/components/renku_data_services/storage/blueprints.py @@ -14,7 +14,7 @@ from renku_data_services.base_api.blueprint import BlueprintFactoryResponse, CustomBlueprint from renku_data_services.base_api.misc import validate_query from renku_data_services.storage import apispec, models -from renku_data_services.storage.db import StorageRepository, StorageV2Repository +from renku_data_services.storage.db import StorageRepository from renku_data_services.storage.rclone import RCloneValidator @@ -28,15 +28,6 @@ def dump_storage_with_sensitive_fields(storage: models.CloudStorage, validator: ).model_dump(exclude_none=True) -def dump_storage_with_sensitive_fields_and_secrets( - storage: models.CloudStorage, validator: RCloneValidator -) -> dict[str, Any]: - """Dump a CloudStorage model alongside sensitive fields and its saved secrets.""" - dumped_storage = dump_storage_with_sensitive_fields(storage, validator) - dumped_storage["secrets"] = [apispec.CloudStorageSecretGet.model_validate(s).model_dump() for s in storage.secrets] - return dumped_storage - - @dataclass(kw_only=True) class StorageBP(CustomBlueprint): """Handlers for manipulating storage definitions.""" @@ -55,7 +46,6 @@ async def _get( validator: RCloneValidator, query: apispec.StorageParams, ) -> JSONResponse: - storage: list[models.CloudStorage] storage = await self.storage_repo.get_storage(user=user, project_id=query.project_id) return json([dump_storage_with_sensitive_fields(s, validator) for s in storage]) @@ -184,160 +174,6 @@ async def _delete(request: Request, user: base_models.APIUser, storage_id: ULID) return "/storage/", ["DELETE"], _delete -@dataclass(kw_only=True) -class StoragesV2BP(CustomBlueprint): - """Handlers for manipulating storage definitions.""" - - storage_v2_repo: StorageV2Repository - authenticator: base_models.Authenticator - - def get(self) -> BlueprintFactoryResponse: - """Get cloud storage for a repository.""" - - @authenticate(self.authenticator) - @validate_query(query=apispec.StorageV2Params) - async def _get( - request: Request, - user: base_models.APIUser, - validator: RCloneValidator, - query: apispec.StorageV2Params, - ) -> JSONResponse: - storage: list[models.CloudStorage] - storage = await self.storage_v2_repo.get_storage( - user=user, include_secrets=True, project_id=query.project_id - ) - - return json([dump_storage_with_sensitive_fields_and_secrets(s, validator) for s in storage]) - - return "/storages_v2", ["GET"], _get - - def get_one(self) -> BlueprintFactoryResponse: - """Get a single storage by id.""" - - @authenticate(self.authenticator) - async def _get_one( - request: Request, - user: base_models.APIUser, - storage_id: ULID, - validator: RCloneValidator, - ) -> JSONResponse: - storage = await self.storage_v2_repo.get_storage_by_id(storage_id, user=user) - - return json(dump_storage_with_sensitive_fields_and_secrets(storage, validator)) - - return "/storages_v2/", ["GET"], _get_one - - def post(self) -> BlueprintFactoryResponse: - """Create a new cloud storage entry.""" - - @authenticate(self.authenticator) - async def _post(request: Request, user: base_models.APIUser, validator: RCloneValidator) -> JSONResponse: - storage: models.UnsavedCloudStorage - if not isinstance(request.json, dict): - body_type = type(request.json) - raise errors.ValidationError( - message=f"The payload is supposed to be a dictionary, got {body_type.__name__}" - ) - if "storage_url" in request.json: - url_body = apispec.CloudStorageUrl(**request.json) - storage = models.UnsavedCloudStorage.from_url( - storage_url=url_body.storage_url, - project_id=url_body.project_id.root, - name=url_body.name, - target_path=url_body.target_path, - readonly=url_body.readonly, - ) - else: - body = apispec.CloudStorage(**request.json) - storage = models.UnsavedCloudStorage.from_dict(body.model_dump()) - - validator.validate(storage.configuration.model_dump()) - - res = await self.storage_v2_repo.insert_storage(storage=storage, user=user) - return json(dump_storage_with_sensitive_fields(res, validator), 201) - - return "/storages_v2", ["POST"], _post - - def patch(self) -> BlueprintFactoryResponse: - """Update parts of a storage entry.""" - - @authenticate(self.authenticator) - @validate(json=apispec.CloudStoragePatch) - async def _patch( - _: Request, - user: base_models.APIUser, - storage_id: ULID, - body: apispec.CloudStoragePatch, - validator: RCloneValidator, - ) -> JSONResponse: - existing_storage = await self.storage_v2_repo.get_storage_by_id(storage_id, user=user) - if body.configuration is not None: - # we need to apply the patch to the existing storage to properly validate it - body.configuration = {**existing_storage.configuration, **body.configuration} - - for k, v in list(body.configuration.items()): - if v is None: - # delete fields that were unset - del body.configuration[k] - validator.validate(body.configuration) - - body_dict = body.model_dump(exclude_none=True) - - res = await self.storage_v2_repo.update_storage(storage_id=storage_id, user=user, **body_dict) - return json(dump_storage_with_sensitive_fields(res, validator)) - - return "/storages_v2/", ["PATCH"], _patch - - def delete(self) -> BlueprintFactoryResponse: - """Delete a storage entry.""" - - @authenticate(self.authenticator) - async def _delete(request: Request, user: base_models.APIUser, storage_id: ULID) -> HTTPResponse: - await self.storage_v2_repo.delete_storage(storage_id=storage_id, user=user) - return empty(204) - - return "/storages_v2/", ["DELETE"], _delete - - def upsert_secrets(self) -> BlueprintFactoryResponse: - """Create/update secrets for a cloud storage.""" - - @authenticate(self.authenticator) - async def _upsert_secrets(request: Request, user: base_models.APIUser, storage_id: ULID) -> JSONResponse: - # TODO: use @validate once sanic supports validating json lists - body = apispec.CloudStorageSecretPostList.model_validate(request.json) - secrets = [models.CloudStorageSecretUpsert.model_validate(s.model_dump()) for s in body.root] - result = await self.storage_v2_repo.upsert_storage_secrets( - storage_id=storage_id, user=user, secrets=secrets - ) - return json( - apispec.CloudStorageSecretGetList.model_validate(result).model_dump(exclude_none=True, mode="json"), 201 - ) - - return "/storages_v2//secrets", ["POST"], _upsert_secrets - - def get_secrets(self) -> BlueprintFactoryResponse: - """Return all secrets for a cloud storage.""" - - @authenticate(self.authenticator) - async def _get_secrets(request: Request, user: base_models.APIUser, storage_id: ULID) -> JSONResponse: - result = await self.storage_v2_repo.get_storage_secrets(storage_id=storage_id, user=user) - return json( - apispec.CloudStorageSecretGetList.model_validate(result).model_dump(exclude_none=True, mode="json"), 200 - ) - - return "/storages_v2//secrets", ["GET"], _get_secrets - - def delete_secrets(self) -> BlueprintFactoryResponse: - """Delete all secrets for a cloud storage.""" - - @authenticate(self.authenticator) - async def _delete_secrets(request: Request, user: base_models.APIUser, storage_id: ULID) -> HTTPResponse: - await self.storage_v2_repo.delete_storage_secrets(storage_id=storage_id, user=user) - return HTTPResponse(status=204) - - return "/storages_v2//secrets", ["DELETE"], _delete_secrets - - @dataclass(kw_only=True) class StorageSchemaBP(CustomBlueprint): """Handler for getting RClone storage schema.""" diff --git a/components/renku_data_services/storage/db.py b/components/renku_data_services/storage/db.py index c2156da0b..3dbf408ba 100644 --- a/components/renku_data_services/storage/db.py +++ b/components/renku_data_services/storage/db.py @@ -4,19 +4,13 @@ from typing import cast from cryptography.hazmat.primitives.asymmetric import rsa -from sqlalchemy import delete, select +from sqlalchemy import select from sqlalchemy.ext.asyncio import AsyncSession -from sqlalchemy.orm import selectinload from ulid import ULID import renku_data_services.base_models as base_models from renku_data_services import errors from renku_data_services.authz import models as authz_models -from renku_data_services.authz.authz import Authz, ResourceType -from renku_data_services.secrets import orm as secrets_schemas -from renku_data_services.secrets.core import encrypt_user_secret -from renku_data_services.secrets.models import SecretKind -from renku_data_services.secrets.orm import SecretORM from renku_data_services.storage import models from renku_data_services.storage import orm as schemas from renku_data_services.users.db import UserRepo @@ -54,7 +48,6 @@ async def get_storage( id: str | None = None, project_id: str | ULID | None = None, name: str | None = None, - include_secrets: bool = False, filter_by_access_level: bool = True, ) -> list[models.CloudStorage]: """Get a storage from the database.""" @@ -67,17 +60,11 @@ async def get_storage( stmt = select(schemas.CloudStorageORM) if project_id is not None: - stmt = stmt.where(schemas.CloudStorageORM.project_id == project_id) + stmt = stmt.where(schemas.CloudStorageORM.project_id == str(project_id)) if id is not None: stmt = stmt.where(schemas.CloudStorageORM.storage_id == id) if name is not None: stmt = stmt.where(schemas.CloudStorageORM.name == name) - if include_secrets: - stmt = stmt.options( - selectinload( - schemas.CloudStorageORM.secrets.and_(schemas.CloudStorageSecretsORM.user_id == user.id) - ) - ) res = await session.execute(stmt) storage_orms = res.scalars().all() @@ -93,7 +80,7 @@ async def get_storage( async def get_storage_by_id(self, storage_id: ULID, user: base_models.APIUser) -> models.CloudStorage: """Get a single storage by id.""" - storages = await self.get_storage(user, id=str(storage_id), include_secrets=True, filter_by_access_level=False) + storages = await self.get_storage(user, id=str(storage_id), filter_by_access_level=False) if not storages: raise errors.MissingResourceError(message=f"The storage with id '{storage_id}' cannot be found") @@ -166,89 +153,6 @@ async def delete_storage(self, storage_id: ULID, user: base_models.APIUser) -> N await session.delete(storage[0]) - async def upsert_storage_secrets( - self, storage_id: ULID, user: base_models.APIUser, secrets: list[models.CloudStorageSecretUpsert] - ) -> list[models.CloudStorageSecret]: - """Create/update cloud storage secrets.""" - # NOTE: Check that user has proper access to the storage - storage = await self.get_storage_by_id(storage_id=storage_id, user=user) - - secret_names_values = {s.name: s.value for s in secrets} - async with self.session_maker() as session, session.begin(): - stmt = ( - select(schemas.CloudStorageSecretsORM) - .where(schemas.CloudStorageSecretsORM.user_id == user.id) - .where(schemas.CloudStorageSecretsORM.storage_id == storage.storage_id) - .options(selectinload(schemas.CloudStorageSecretsORM.secret)) - ) - result = await session.execute(stmt) - existing_storage_secrets_orm = result.scalars().all() - - existing_secrets = {s.name: s for s in existing_storage_secrets_orm} - stored_secrets = [] - - for name, value in secret_names_values.items(): - encrypted_value, encrypted_key = await encrypt_user_secret( - user_repo=self.user_repo, - requested_by=user, - secret_service_public_key=self.secret_service_public_key, - secret_value=value, - ) - - if storage_secret_orm := existing_secrets.get(name): - storage_secret_orm.secret.update(encrypted_value=encrypted_value, encrypted_key=encrypted_key) - else: - secret_orm = secrets_schemas.SecretORM( - name=f"{storage_id}-{name}", - user_id=cast(str, user.id), - encrypted_value=encrypted_value, - encrypted_key=encrypted_key, - kind=SecretKind.storage, - ) - session.add(secret_orm) - - storage_secret_orm = schemas.CloudStorageSecretsORM( - user_id=cast(str, user.id), - storage_id=storage_id, - name=name, - secret_id=secret_orm.id, - ) - session.add(storage_secret_orm) - - stored_secrets.append(storage_secret_orm.dump()) - - return stored_secrets - - async def get_storage_secrets(self, storage_id: ULID, user: base_models.APIUser) -> list[models.CloudStorageSecret]: - """Get cloud storage secrets.""" - async with self.session_maker() as session, session.begin(): - stmt = ( - select(schemas.CloudStorageSecretsORM) - .where(schemas.CloudStorageSecretsORM.user_id == user.id) - .where(schemas.CloudStorageSecretsORM.storage_id == storage_id) - ) - result = await session.execute(stmt) - storage_secrets_orm = result.scalars().all() - - return [s.dump() for s in storage_secrets_orm] - - async def delete_storage_secrets(self, storage_id: ULID, user: base_models.APIUser) -> None: - """Delete cloud storage secrets.""" - async with self.session_maker() as session, session.begin(): - stmt = ( - delete(SecretORM) - .where(schemas.CloudStorageSecretsORM.secret_id == SecretORM.id) - .where(schemas.CloudStorageSecretsORM.user_id == user.id) - .where(schemas.CloudStorageSecretsORM.storage_id == storage_id) - ) - await session.execute(stmt) - stmt = ( - delete(schemas.CloudStorageSecretsORM) - .where(schemas.CloudStorageSecretsORM.user_id == user.id) - .where(schemas.CloudStorageSecretsORM.storage_id == storage_id) - ) - await session.execute(stmt) - class StorageRepository(BaseStorageRepository): """Repository for V1 cloud storage.""" @@ -274,31 +178,3 @@ async def filter_projects_by_access_level( ) return await self.gitlab_client.filter_projects_by_access_level(user, project_ids, gitlab_access_level) - - -class StorageV2Repository(BaseStorageRepository): - """Repository for V2 cloud storage.""" - - def __init__( - self, - project_authz: Authz, - session_maker: Callable[..., AsyncSession], - user_repo: UserRepo, - secret_service_public_key: rsa.RSAPublicKey, - ) -> None: - super().__init__(session_maker, user_repo, secret_service_public_key) - self.project_authz: Authz = project_authz - - async def filter_projects_by_access_level( - self, user: base_models.APIUser, project_ids: list[str], minimum_access_level: authz_models.Role - ) -> list[str]: - """Get a list of project IDs of which the user is a member with a specific access level.""" - if not user.is_authenticated or not project_ids: - return [] - - scope = authz_models.Scope.WRITE if minimum_access_level == authz_models.Role.OWNER else authz_models.Scope.READ - output = [] - for id in project_ids: - if await self.project_authz.has_permission(user, ResourceType.project, ULID.from_str(id), scope): - output.append(id) - return output diff --git a/components/renku_data_services/storage/models.py b/components/renku_data_services/storage/models.py index e019f77c5..4817e9931 100644 --- a/components/renku_data_services/storage/models.py +++ b/components/renku_data_services/storage/models.py @@ -70,8 +70,6 @@ class UnsavedCloudStorage(BaseModel): target_path: str = Field(min_length=1) """Path inside the target repository to mount/clone data to.""" - secrets: list["CloudStorageSecret"] = Field(default_factory=list) - @classmethod def from_dict(cls, data: dict) -> "UnsavedCloudStorage": """Create the model from a plain dictionary.""" @@ -230,26 +228,3 @@ class CloudStorage(UnsavedCloudStorage): """Cloudstorage saved in the database.""" storage_id: ULID = Field(default=None) - - -class CloudStorageSecret(BaseModel): - """Cloud storage secret model.""" - - user_id: str = Field() - storage_id: ULID = Field() - name: str = Field(min_length=1, max_length=99) - secret_id: ULID = Field() - - @classmethod - def from_dict(cls, data: dict) -> "CloudStorageSecret": - """Create the model from a plain dictionary.""" - return cls( - user_id=data["user_id"], storage_id=data["storage_id"], name=data["name"], secret_id=data["secret_id"] - ) - - -class CloudStorageSecretUpsert(BaseModel): - """Insert/update storage secret data.""" - - name: str = Field() - value: str = Field() diff --git a/components/renku_data_services/storage/orm.py b/components/renku_data_services/storage/orm.py index e127cc439..cf5fe9106 100644 --- a/components/renku_data_services/storage/orm.py +++ b/components/renku_data_services/storage/orm.py @@ -2,15 +2,13 @@ from typing import Any -from sqlalchemy import JSON, Boolean, ForeignKey, MetaData, String +from sqlalchemy import JSON, Boolean, MetaData, String from sqlalchemy.dialects.postgresql import JSONB -from sqlalchemy.orm import DeclarativeBase, Mapped, MappedAsDataclass, mapped_column, relationship -from sqlalchemy.schema import Index, UniqueConstraint +from sqlalchemy.orm import DeclarativeBase, Mapped, MappedAsDataclass, mapped_column +from sqlalchemy.schema import UniqueConstraint from ulid import ULID -from renku_data_services.secrets.orm import SecretORM from renku_data_services.storage import models -from renku_data_services.users.orm import UserORM from renku_data_services.utils.sqlalchemy import ULIDType JSONVariant = JSON().with_variant(JSONB(), "postgresql") @@ -55,11 +53,6 @@ class CloudStorageORM(BaseORM): ) """Id of this storage.""" - secrets: Mapped[list["CloudStorageSecretsORM"]] = relationship( - lazy="noload", init=False, viewonly=True, default_factory=list - ) - """Saved secrets for the storage.""" - __table_args__ = ( UniqueConstraint( "project_id", @@ -92,41 +85,4 @@ def dump(self) -> models.CloudStorage: target_path=self.target_path, storage_id=self.storage_id, readonly=self.readonly, - secrets=[s.dump() for s in self.secrets], - ) - - -class CloudStorageSecretsORM(BaseORM): - """Secrets for cloud storages.""" - - __tablename__ = "cloud_storage_secrets" - __table_args__ = (Index("ix_storage_cloud_storage_secrets_user_id_storage_id", "user_id", "storage_id"),) - - user_id: Mapped[str] = mapped_column( - "user_id", ForeignKey(UserORM.keycloak_id, ondelete="CASCADE"), primary_key=True - ) - - storage_id: Mapped[ULID] = mapped_column( - "storage_id", ForeignKey(CloudStorageORM.storage_id, ondelete="CASCADE"), primary_key=True - ) - - name: Mapped[str] = mapped_column("name", String(), primary_key=True) - - secret_id: Mapped[ULID] = mapped_column("secret_id", ForeignKey(SecretORM.id, ondelete="CASCADE")) - secret: Mapped[SecretORM] = relationship(init=False, repr=False, lazy="selectin") - - @classmethod - def load(cls, storage_secret: models.CloudStorageSecret) -> "CloudStorageSecretsORM": - """Create an instance from the cloud storage secret model.""" - return cls( - user_id=storage_secret.user_id, - storage_id=storage_secret.storage_id, - name=storage_secret.name, - secret_id=storage_secret.secret_id, - ) - - def dump(self) -> models.CloudStorageSecret: - """Create a cloud storage secret model from the ORM object.""" - return models.CloudStorageSecret( - user_id=self.user_id, storage_id=self.storage_id, name=self.name, secret_id=self.secret_id ) diff --git a/components/renku_data_services/utils/middleware.py b/components/renku_data_services/utils/middleware.py index 8a35ca312..28e6ca279 100644 --- a/components/renku_data_services/utils/middleware.py +++ b/components/renku_data_services/utils/middleware.py @@ -7,5 +7,5 @@ async def validate_null_byte(request: Request) -> None: """Validate that a request does not contain a null byte.""" - if "\\u0000".encode() in request.body: # noqa: UP012 + if b"\\u0000" in request.body: raise errors.ValidationError(message="Null byte found in request") diff --git a/components/renku_data_services/utils/sqlalchemy.py b/components/renku_data_services/utils/sqlalchemy.py index f1cd59c9b..de520c5d7 100644 --- a/components/renku_data_services/utils/sqlalchemy.py +++ b/components/renku_data_services/utils/sqlalchemy.py @@ -1,5 +1,6 @@ """Utilities for SQLAlchemy.""" +from pathlib import PurePosixPath from typing import cast from sqlalchemy import Dialect, types @@ -23,3 +24,25 @@ def process_result_value(self, value: str | None, dialect: Dialect) -> ULID | No if value is None: return None return cast(ULID, ULID.from_str(value)) # cast because mypy doesn't understand ULID type annotations + + +class PurePosixPathType(types.TypeDecorator): + """Wrapper type for Path <--> str conversion.""" + + impl = types.String + cache_ok = True + + def process_bind_param(self, value: PurePosixPath | str | None, dialect: Dialect) -> str | None: + """Transform value for storing in the database.""" + if value is None: + return None + elif isinstance(value, str): + return value + else: + return value.as_posix() + + def process_result_value(self, value: str | None, dialect: Dialect) -> PurePosixPath | None: + """Transform string from database into PosixPath.""" + if value is None: + return None + return PurePosixPath(value) diff --git a/kind_config.yaml b/kind_config.yaml deleted file mode 100644 index a83c3b093..000000000 --- a/kind_config.yaml +++ /dev/null @@ -1,16 +0,0 @@ -kind: Cluster -apiVersion: kind.x-k8s.io/v1alpha4 -networking: - ipFamily: ipv4 -nodes: - - role: control-plane - kubeadmConfigPatches: - - | - kind: InitConfiguration - nodeRegistration: - kubeletExtraArgs: - node-labels: "ingress-ready=true" - extraPortMappings: - - containerPort: 80 - hostPort: 80 - protocol: TCP diff --git a/poetry.lock b/poetry.lock index 0a75f5f82..4bd733d51 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "aiofiles" @@ -215,6 +215,17 @@ doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphin test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] trio = ["trio (>=0.23)"] +[[package]] +name = "appier" +version = "1.34.4" +description = "Appier Framework" +optional = false +python-versions = "*" +files = [ + {file = "appier-1.34.4-py2.py3-none-any.whl", hash = "sha256:96769bf56e015175f798958d6eeb189b6fbc2ca799f3b7d2c9bd463eb45a12ae"}, + {file = "appier-1.34.4.tar.gz", hash = "sha256:dd3b244ee2797c7ceda0b81f2331e39724262d4e838f1e53866543136d88ee96"}, +] + [[package]] name = "argcomplete" version = "3.5.0" @@ -229,6 +240,20 @@ files = [ [package.extras] test = ["coverage", "mypy", "pexpect", "ruff", "wheel"] +[[package]] +name = "asyncache" +version = "0.3.1" +description = "Helpers to use cachetools with async code." +optional = false +python-versions = ">=3.8,<4.0" +files = [ + {file = "asyncache-0.3.1-py3-none-any.whl", hash = "sha256:ef20a1024d265090dd1e0785c961cf98b9c32cc7d9478973dcf25ac1b80011f5"}, + {file = "asyncache-0.3.1.tar.gz", hash = "sha256:9a1e60a75668e794657489bdea6540ee7e3259c483517b934670db7600bf5035"}, +] + +[package.dependencies] +cachetools = ">=5.2.0,<6.0.0" + [[package]] name = "asyncpg" version = "0.29.0" @@ -318,20 +343,21 @@ cryptography = "*" [[package]] name = "authzed" -version = "0.18.3" +version = "0.16.0" description = "Client library for SpiceDB." optional = false python-versions = "<4.0,>=3.8" files = [ - {file = "authzed-0.18.3-py3-none-any.whl", hash = "sha256:367baa6f00dac68daad814f7db82464576530a11806ac4b17978d8c0e89511b7"}, - {file = "authzed-0.18.3.tar.gz", hash = "sha256:a06a930c78fdbe61f5caf4415739a9941d23068527fcf91011edc1ae0188a4f5"}, + {file = "authzed-0.16.0-py3-none-any.whl", hash = "sha256:84d3f3b1e4f8db19ddf86e9a5dea347f2fc5ce6f1dcd4c897758db57be5bde93"}, + {file = "authzed-0.16.0.tar.gz", hash = "sha256:29d6338c9c566227f704718639558959afc027be8d5b46bd01ef1dab5a26d0db"}, ] [package.dependencies] -googleapis-common-protos = ">=1.65.0,<2.0.0" -grpc-interceptor = ">=0.15.4,<0.16.0" +google_api = ">=0.1.12,<0.2.0" +google-api-core = ">=2.4.0,<3.0.0" grpcio = ">=1.63,<2.0" protobuf = ">=5.26,<6" +typing-extensions = ">=3.7.4,<5" [[package]] name = "avro-preprocessor" @@ -763,38 +789,43 @@ toml = ["tomli"] [[package]] name = "cryptography" -version = "43.0.1" +version = "42.0.8" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = ">=3.7" files = [ - {file = "cryptography-43.0.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a"}, - {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042"}, - {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494"}, - {file = "cryptography-43.0.1-cp37-abi3-win32.whl", hash = "sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2"}, - {file = "cryptography-43.0.1-cp37-abi3-win_amd64.whl", hash = "sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d"}, - {file = "cryptography-43.0.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1"}, - {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa"}, - {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4"}, - {file = "cryptography-43.0.1-cp39-abi3-win32.whl", hash = "sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47"}, - {file = "cryptography-43.0.1-cp39-abi3-win_amd64.whl", hash = "sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2"}, - {file = "cryptography-43.0.1.tar.gz", hash = "sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d"}, + {file = "cryptography-42.0.8-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:81d8a521705787afe7a18d5bfb47ea9d9cc068206270aad0b96a725022e18d2e"}, + {file = "cryptography-42.0.8-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:961e61cefdcb06e0c6d7e3a1b22ebe8b996eb2bf50614e89384be54c48c6b63d"}, + {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3ec3672626e1b9e55afd0df6d774ff0e953452886e06e0f1eb7eb0c832e8902"}, + {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e599b53fd95357d92304510fb7bda8523ed1f79ca98dce2f43c115950aa78801"}, + {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:5226d5d21ab681f432a9c1cf8b658c0cb02533eece706b155e5fbd8a0cdd3949"}, + {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:6b7c4f03ce01afd3b76cf69a5455caa9cfa3de8c8f493e0d3ab7d20611c8dae9"}, + {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:2346b911eb349ab547076f47f2e035fc8ff2c02380a7cbbf8d87114fa0f1c583"}, + {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:ad803773e9df0b92e0a817d22fd8a3675493f690b96130a5e24f1b8fabbea9c7"}, + {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2f66d9cd9147ee495a8374a45ca445819f8929a3efcd2e3df6428e46c3cbb10b"}, + {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:d45b940883a03e19e944456a558b67a41160e367a719833c53de6911cabba2b7"}, + {file = "cryptography-42.0.8-cp37-abi3-win32.whl", hash = "sha256:a0c5b2b0585b6af82d7e385f55a8bc568abff8923af147ee3c07bd8b42cda8b2"}, + {file = "cryptography-42.0.8-cp37-abi3-win_amd64.whl", hash = "sha256:57080dee41209e556a9a4ce60d229244f7a66ef52750f813bfbe18959770cfba"}, + {file = "cryptography-42.0.8-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:dea567d1b0e8bc5764b9443858b673b734100c2871dc93163f58c46a97a83d28"}, + {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4783183f7cb757b73b2ae9aed6599b96338eb957233c58ca8f49a49cc32fd5e"}, + {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0608251135d0e03111152e41f0cc2392d1e74e35703960d4190b2e0f4ca9c70"}, + {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dc0fdf6787f37b1c6b08e6dfc892d9d068b5bdb671198c72072828b80bd5fe4c"}, + {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:9c0c1716c8447ee7dbf08d6db2e5c41c688544c61074b54fc4564196f55c25a7"}, + {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fff12c88a672ab9c9c1cf7b0c80e3ad9e2ebd9d828d955c126be4fd3e5578c9e"}, + {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:cafb92b2bc622cd1aa6a1dce4b93307792633f4c5fe1f46c6b97cf67073ec961"}, + {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:31f721658a29331f895a5a54e7e82075554ccfb8b163a18719d342f5ffe5ecb1"}, + {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b297f90c5723d04bcc8265fc2a0f86d4ea2e0f7ab4b6994459548d3a6b992a14"}, + {file = "cryptography-42.0.8-cp39-abi3-win32.whl", hash = "sha256:2f88d197e66c65be5e42cd72e5c18afbfae3f741742070e3019ac8f4ac57262c"}, + {file = "cryptography-42.0.8-cp39-abi3-win_amd64.whl", hash = "sha256:fa76fbb7596cc5839320000cdd5d0955313696d9511debab7ee7278fc8b5c84a"}, + {file = "cryptography-42.0.8-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ba4f0a211697362e89ad822e667d8d340b4d8d55fae72cdd619389fb5912eefe"}, + {file = "cryptography-42.0.8-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:81884c4d096c272f00aeb1f11cf62ccd39763581645b0812e99a91505fa48e0c"}, + {file = "cryptography-42.0.8-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c9bb2ae11bfbab395bdd072985abde58ea9860ed84e59dbc0463a5d0159f5b71"}, + {file = "cryptography-42.0.8-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7016f837e15b0a1c119d27ecd89b3515f01f90a8615ed5e9427e30d9cdbfed3d"}, + {file = "cryptography-42.0.8-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5a94eccb2a81a309806027e1670a358b99b8fe8bfe9f8d329f27d72c094dde8c"}, + {file = "cryptography-42.0.8-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dec9b018df185f08483f294cae6ccac29e7a6e0678996587363dc352dc65c842"}, + {file = "cryptography-42.0.8-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:343728aac38decfdeecf55ecab3264b015be68fc2816ca800db649607aeee648"}, + {file = "cryptography-42.0.8-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:013629ae70b40af70c9a7a5db40abe5d9054e6f4380e50ce769947b73bf3caad"}, + {file = "cryptography-42.0.8.tar.gz", hash = "sha256:8d09d05439ce7baa8e9e95b07ec5b6c886f548deb7e0f69ef25f64b3bce842f2"}, ] [package.dependencies] @@ -807,7 +838,7 @@ nox = ["nox"] pep8test = ["check-sdist", "click", "mypy", "ruff"] sdist = ["build"] ssh = ["bcrypt (>=3.1.5)"] -test = ["certifi", "cryptography-vectors (==43.0.1)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test = ["certifi", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] test-randomorder = ["pytest-randomly"] [[package]] @@ -1214,6 +1245,43 @@ files = [ {file = "genson-1.3.0.tar.gz", hash = "sha256:e02db9ac2e3fd29e65b5286f7135762e2cd8a986537c075b06fc5f1517308e37"}, ] +[[package]] +name = "google-api" +version = "0.1.12" +description = "Google API Client" +optional = false +python-versions = "*" +files = [ + {file = "google_api-0.1.12-py2.py3-none-any.whl", hash = "sha256:618f9f2076482a128c408867b5398b291938fe8e653ed7f8ed58fce5042f0c75"}, + {file = "google_api-0.1.12.tar.gz", hash = "sha256:5611c87cdfc6b72927a5e2ea9299ddd6f3a206e29a342b86d3ff3ecc351c30a3"}, +] + +[package.dependencies] +appier = "*" + +[[package]] +name = "google-api-core" +version = "2.20.0" +description = "Google API client core library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_api_core-2.20.0-py3-none-any.whl", hash = "sha256:ef0591ef03c30bb83f79b3d0575c3f31219001fc9c5cf37024d08310aeffed8a"}, + {file = "google_api_core-2.20.0.tar.gz", hash = "sha256:f74dff1889ba291a4b76c5079df0711810e2d9da81abfdc99957bc961c1eb28f"}, +] + +[package.dependencies] +google-auth = ">=2.14.1,<3.0.dev0" +googleapis-common-protos = ">=1.56.2,<2.0.dev0" +proto-plus = ">=1.22.3,<2.0.0dev" +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0" +requests = ">=2.18.0,<3.0.0.dev0" + +[package.extras] +grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0)"] +grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] +grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] + [[package]] name = "google-auth" version = "2.34.0" @@ -1336,23 +1404,6 @@ files = [ docs = ["Sphinx", "furo"] test = ["objgraph", "psutil"] -[[package]] -name = "grpc-interceptor" -version = "0.15.4" -description = "Simplifies gRPC interceptors" -optional = false -python-versions = ">=3.7,<4.0" -files = [ - {file = "grpc-interceptor-0.15.4.tar.gz", hash = "sha256:1f45c0bcb58b6f332f37c637632247c9b02bc6af0fdceb7ba7ce8d2ebbfb0926"}, - {file = "grpc_interceptor-0.15.4-py3-none-any.whl", hash = "sha256:0035f33228693ed3767ee49d937bac424318db173fef4d2d0170b3215f254d9d"}, -] - -[package.dependencies] -grpcio = ">=1.49.1,<2.0.0" - -[package.extras] -testing = ["protobuf (>=4.21.9)"] - [[package]] name = "grpcio" version = "1.65.5" @@ -1527,6 +1578,40 @@ http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] zstd = ["zstandard (>=0.18.0)"] +[[package]] +name = "httpx-ws" +version = "0.6.0" +description = "WebSockets support for HTTPX" +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx_ws-0.6.0-py3-none-any.whl", hash = "sha256:437cfca94519a4e6ae06eb5573192df6c0da85c22b1a19cc1ea0b02b05a51d25"}, + {file = "httpx_ws-0.6.0.tar.gz", hash = "sha256:60218f531fb474a2143af38568f4b7d94ba356780973443365c8e2c87882bb8c"}, +] + +[package.dependencies] +anyio = ">=4" +httpcore = ">=1.0.4" +httpx = ">=0.23.1" +wsproto = "*" + +[[package]] +name = "httpx-ws" +version = "0.6.0" +description = "WebSockets support for HTTPX" +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx_ws-0.6.0-py3-none-any.whl", hash = "sha256:437cfca94519a4e6ae06eb5573192df6c0da85c22b1a19cc1ea0b02b05a51d25"}, + {file = "httpx_ws-0.6.0.tar.gz", hash = "sha256:60218f531fb474a2143af38568f4b7d94ba356780973443365c8e2c87882bb8c"}, +] + +[package.dependencies] +anyio = ">=4" +httpcore = ">=1.0.4" +httpx = ">=0.23.1" +wsproto = "*" + [[package]] name = "hypothesis" version = "6.111.1" @@ -1736,6 +1821,31 @@ files = [ [package.dependencies] six = "*" +[[package]] +name = "kr8s" +version = "0.17.0" +description = "A Kubernetes API library" +optional = false +python-versions = ">=3.8" +files = [ + {file = "kr8s-0.17.0-py3-none-any.whl", hash = "sha256:7307bca7b125cdc8c41ec9d7a0b3b1273c4c76b10b992a054aaf1e38309f1445"}, + {file = "kr8s-0.17.0.tar.gz", hash = "sha256:c2afe40461f1b1c853dcde755a64fe4837e05b931c6effbfff12ab32ae224445"}, +] + +[package.dependencies] +anyio = ">=3.7.0" +asyncache = ">=0.3.1" +cryptography = ">=35" +httpx = ">=0.24.1" +httpx-ws = ">=0.5.2" +python-box = ">=7.0.1" +python-jsonpath = ">=0.7.1" +pyyaml = ">=6.0" + +[package.extras] +docs = ["furo (>=2023.3.27)", "myst-parser (>=1.0.0)", "sphinx (>=5.3.0)", "sphinx-autoapi (>=2.1.0)", "sphinx-autobuild (>=2021.3.14)", "sphinx-copybutton (>=0.5.1)", "sphinx-design (>=0.3.0)", "sphinxcontrib-mermaid (>=0.8.1)"] +test = ["kubernetes (>=26.1.0)", "kubernetes-asyncio (>=24.2.3)", "kubernetes-validate (>=1.28.0)", "lightkube (>=0.13.0)", "pykube-ng (>=23.6.0)", "pytest (>=7.2.2)", "pytest-asyncio (>=0.20.3)", "pytest-cov (>=4.0.0)", "pytest-kind (>=22.11.1)", "pytest-rerunfailures (>=11.1.2)", "pytest-timeout (>=2.1.0)", "trio (>=0.22.0)", "types-pyyaml (>=6.0)"] + [[package]] name = "kubernetes" version = "31.0.0" @@ -2260,6 +2370,23 @@ files = [ prometheus-client = ">=0.7.1,<0.8.0" sanic = ">=22.0.0" +[[package]] +name = "proto-plus" +version = "1.24.0" +description = "Beautiful, Pythonic protocol buffers." +optional = false +python-versions = ">=3.7" +files = [ + {file = "proto-plus-1.24.0.tar.gz", hash = "sha256:30b72a5ecafe4406b0d339db35b56c4059064e69227b8c3bda7462397f966445"}, + {file = "proto_plus-1.24.0-py3-none-any.whl", hash = "sha256:402576830425e5f6ce4c2a6702400ac79897dab0b4343821aa5188b0fab81a12"}, +] + +[package.dependencies] +protobuf = ">=3.19.0,<6.0.0dev" + +[package.extras] +testing = ["google-api-core (>=1.31.5)"] + [[package]] name = "protobuf" version = "5.27.3" @@ -2694,6 +2821,23 @@ pytest = ">=4.6" [package.extras] testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] +[[package]] +name = "pytest-mock" +version = "3.14.0" +description = "Thin-wrapper around the mock package for easier use with pytest" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"}, + {file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"}, +] + +[package.dependencies] +pytest = ">=6.2.5" + +[package.extras] +dev = ["pre-commit", "pytest-asyncio", "tox"] + [[package]] name = "pytest-postgresql" version = "6.1.1" @@ -2747,6 +2891,41 @@ psutil = ["psutil (>=3.0)"] setproctitle = ["setproctitle"] testing = ["filelock"] +[[package]] +name = "python-box" +version = "7.2.0" +description = "Advanced Python dictionaries with dot notation access" +optional = false +python-versions = ">=3.8" +files = [ + {file = "python_box-7.2.0-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:6bdeec791e25258351388b3029a3ec5da302bb9ed3be175493c43cdc6c47f5e3"}, + {file = "python_box-7.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c449f7b3756a71479fa9c61a86e344ac00ed782a66d7662590f0afa294249d18"}, + {file = "python_box-7.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:6b0d61f182d394106d963232854e495b51edc178faa5316a797be1178212d7e0"}, + {file = "python_box-7.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e2d752de8c1204255bf7b0c814c59ef48293c187a7e9fdcd2fefa28024b72032"}, + {file = "python_box-7.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8a6c35ea356a386077935958a5debcd5b229b9a1b3b26287a52dfe1a7e65d99"}, + {file = "python_box-7.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:32ed58ec4d9e5475efe69f9c7d773dfea90a6a01979e776da93fd2b0a5d04429"}, + {file = "python_box-7.2.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:2a2d664c6a27f7515469b6f1e461935a2038ee130b7d194b4b4db4e85d363618"}, + {file = "python_box-7.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8a5a7365db1aaf600d3e8a2747fcf6833beb5d45439a54318548f02e302e3ec"}, + {file = "python_box-7.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:739f827056ea148cbea3122d4617c994e829b420b1331183d968b175304e3a4f"}, + {file = "python_box-7.2.0-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:2617ef3c3d199f55f63c908f540a4dc14ced9b18533a879e6171c94a6a436f23"}, + {file = "python_box-7.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffd866bed03087b1d8340014da8c3aaae19135767580641df1b4ae6fff6ac0aa"}, + {file = "python_box-7.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:9681f059e7e92bdf20782cd9ea6e533d4711fc7b8c57a462922a025d46add4d0"}, + {file = "python_box-7.2.0-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:6b59b1e2741c9ceecdf5a5bd9b90502c24650e609cd824d434fed3b6f302b7bb"}, + {file = "python_box-7.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e23fae825d809ae7520fdeac88bb52be55a3b63992120a00e381783669edf589"}, + {file = "python_box-7.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:573b1abdcb7bd745fa404444f060ee62fc35a74f067181e55dcb43cfe92f2827"}, + {file = "python_box-7.2.0-py3-none-any.whl", hash = "sha256:a3c90832dd772cb0197fdb5bc06123b6e1b846899a1b53d9c39450d27a584829"}, + {file = "python_box-7.2.0.tar.gz", hash = "sha256:551af20bdab3a60a2a21e3435120453c4ca32f7393787c3a5036e1d9fc6a0ede"}, +] + +[package.extras] +all = ["msgpack", "ruamel.yaml (>=0.17)", "toml"] +msgpack = ["msgpack"] +pyyaml = ["PyYAML"] +ruamel-yaml = ["ruamel.yaml (>=0.17)"] +toml = ["toml"] +tomli = ["tomli", "tomli-w"] +yaml = ["ruamel.yaml (>=0.17)"] + [[package]] name = "python-dateutil" version = "2.9.0.post0" @@ -2781,6 +2960,17 @@ autocompletion = ["argcomplete (>=1.10.0,<3)"] graphql = ["gql[httpx] (>=3.5.0,<4)"] yaml = ["PyYaml (>=6.0.1)"] +[[package]] +name = "python-jsonpath" +version = "1.1.1" +description = "JSONPath, JSON Pointer and JSON Patch for Python." +optional = false +python-versions = ">=3.7" +files = [ + {file = "python_jsonpath-1.1.1-py3-none-any.whl", hash = "sha256:43f2622b7aaaf4f45dd873e80cfd181058503e08ffdeac5218135f3a97bd0aec"}, + {file = "python_jsonpath-1.1.1.tar.gz", hash = "sha256:d2944e1f7a1d6c8fa958724f9570b8f04a4e00ab6bf1e4733346ab8dcef1f74f"}, +] + [[package]] name = "python-ulid" version = "2.7.0" @@ -3122,24 +3312,24 @@ python-versions = ">=3.6" files = [ {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:d92f81886165cb14d7b067ef37e142256f1c6a90a65cd156b063a43da1708cfd"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fff3573c2db359f091e1589c3d7c5fc2f86f5bdb6f24252c2d8e539d4e45f412"}, - {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:aa2267c6a303eb483de8d02db2871afb5c5fc15618d894300b88958f729ad74f"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:024cfe1fc7c7f4e1aff4a81e718109e13409767e4f871443cbff3dba3578203d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win32.whl", hash = "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win_amd64.whl", hash = "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:b5edda50e5e9e15e54a6a8a0070302b00c518a9d32accc2346ad6c984aacd279"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b"}, - {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_24_aarch64.whl", hash = "sha256:1707814f0d9791df063f8c19bb51b0d1278b8e9a2353abbb676c2f685dee6afe"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win32.whl", hash = "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win_amd64.whl", hash = "sha256:c2a72e9109ea74e511e29032f3b670835f8a59bbdc9ce692c5b4ed91ccf1eedb"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ebc06178e8821efc9692ea7544aa5644217358490145629914d8020042c24aa1"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:edaef1c1200c4b4cb914583150dcaa3bc30e592e907c01117c08b13a07255ec2"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:7048c338b6c86627afb27faecf418768acb6331fc24cfa56c93e8c9780f815fa"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92"}, - {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_24_aarch64.whl", hash = "sha256:1dc67314e7e1086c9fdf2680b7b6c2be1c0d8e3a8279f2e993ca2a7545fecf62"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-win32.whl", hash = "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa"}, @@ -3147,7 +3337,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c58ecd827313af6864893e7af0a3bb85fd529f862b6adbefe14643947cfe2942"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_12_0_arm64.whl", hash = "sha256:f481f16baec5290e45aebdc2a5168ebc6d35189ae6fea7a58787613a25f6e875"}, - {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:77159f5d5b5c14f7c34073862a6b7d34944075d9f93e681638f6d753606c6ce6"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:3fcc54cb0c8b811ff66082de1680b4b14cf8a81dce0d4fbf665c2265a81e07a1"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3"}, @@ -3155,7 +3345,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-win_amd64.whl", hash = "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337"}, - {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:305889baa4043a09e5b76f8e2a51d4ffba44259f6b4c72dec8ca56207d9c6fe1"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:665f58bfd29b167039f714c6998178d27ccd83984084c286110ef26b230f259f"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d"}, @@ -3163,7 +3353,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-win_amd64.whl", hash = "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf"}, - {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:a1a45e0bb052edf6a1d3a93baef85319733a888363938e1fc9924cb00c8df24c"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:9eb5dee2772b0f704ca2e45b1713e4e5198c18f515b52743576d196348f374d3"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880"}, @@ -3571,6 +3761,17 @@ files = [ doc = ["reno", "sphinx"] test = ["pytest", "tornado (>=4.5)", "typeguard"] +[[package]] +name = "toml" +version = "0.10.2" +description = "Python Library for Tom's Obvious, Minimal Language" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, + {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, +] + [[package]] name = "tomli" version = "2.0.1" @@ -3698,6 +3899,17 @@ files = [ {file = "types_setuptools-72.2.0.20240821-py3-none-any.whl", hash = "sha256:260e89d6d3b42cc35f9f0f382d030713b7b547344a664c05c9175e6ba124fac7"}, ] +[[package]] +name = "types-toml" +version = "0.10.8.20240310" +description = "Typing stubs for toml" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-toml-0.10.8.20240310.tar.gz", hash = "sha256:3d41501302972436a6b8b239c850b26689657e25281b48ff0ec06345b8830331"}, + {file = "types_toml-0.10.8.20240310-py3-none-any.whl", hash = "sha256:627b47775d25fa29977d9c70dc0cbab3f314f32c8d8d0c012f2ef5de7aaec05d"}, +] + [[package]] name = "types-urllib3" version = "1.26.25.14" @@ -4038,6 +4250,20 @@ MarkupSafe = ">=2.1.1" [package.extras] watchdog = ["watchdog (>=2.3)"] +[[package]] +name = "wsproto" +version = "1.2.0" +description = "WebSockets state-machine based protocol implementation" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "wsproto-1.2.0-py3-none-any.whl", hash = "sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736"}, + {file = "wsproto-1.2.0.tar.gz", hash = "sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065"}, +] + +[package.dependencies] +h11 = ">=0.9.0,<1" + [[package]] name = "yarl" version = "1.9.4" @@ -4144,4 +4370,4 @@ multidict = ">=4.0" [metadata] lock-version = "2.0" python-versions = "^3.12" -content-hash = "a44608066433ede3cf9875cebac61b84b8f50a56a0455a7ed774f651d1bfe065" +content-hash = "1e89e031c2b9136c116e2307e46fc721c9193726310e1806b7ee427bd4444c99" diff --git a/projects/background_jobs/poetry.lock b/projects/background_jobs/poetry.lock index c485b846c..b63577bbd 100644 --- a/projects/background_jobs/poetry.lock +++ b/projects/background_jobs/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "aiofile" @@ -78,6 +78,34 @@ doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphin test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] trio = ["trio (>=0.23)"] +[[package]] +name = "asyncache" +version = "0.3.1" +description = "Helpers to use cachetools with async code." +optional = false +python-versions = ">=3.8,<4.0" +files = [ + {file = "asyncache-0.3.1-py3-none-any.whl", hash = "sha256:ef20a1024d265090dd1e0785c961cf98b9c32cc7d9478973dcf25ac1b80011f5"}, + {file = "asyncache-0.3.1.tar.gz", hash = "sha256:9a1e60a75668e794657489bdea6540ee7e3259c483517b934670db7600bf5035"}, +] + +[package.dependencies] +cachetools = ">=5.2.0,<6.0.0" + +[[package]] +name = "asyncache" +version = "0.3.1" +description = "Helpers to use cachetools with async code." +optional = false +python-versions = ">=3.8,<4.0" +files = [ + {file = "asyncache-0.3.1-py3-none-any.whl", hash = "sha256:ef20a1024d265090dd1e0785c961cf98b9c32cc7d9478973dcf25ac1b80011f5"}, + {file = "asyncache-0.3.1.tar.gz", hash = "sha256:9a1e60a75668e794657489bdea6540ee7e3259c483517b934670db7600bf5035"}, +] + +[package.dependencies] +cachetools = ">=5.2.0,<6.0.0" + [[package]] name = "asyncpg" version = "0.29.0" @@ -548,6 +576,17 @@ files = [ dnspython = ">=2.0.0" idna = ">=2.0.0" +[[package]] +name = "escapism" +version = "1.0.1" +description = "Simple, generic API for escaping strings." +optional = false +python-versions = "*" +files = [ + {file = "escapism-1.0.1-py2.py3-none-any.whl", hash = "sha256:d28f19edc3cb1ffc36fa238956ecc068695477e748f57157c6dde00a6b77f229"}, + {file = "escapism-1.0.1.tar.gz", hash = "sha256:73256bdfb4f22230f0428fc6efecee61cdc4fad531b6f98b849cb9c80711e4ec"}, +] + [[package]] name = "factory-boy" version = "3.3.0" @@ -950,6 +989,40 @@ http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] zstd = ["zstandard (>=0.18.0)"] +[[package]] +name = "httpx-ws" +version = "0.6.0" +description = "WebSockets support for HTTPX" +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx_ws-0.6.0-py3-none-any.whl", hash = "sha256:437cfca94519a4e6ae06eb5573192df6c0da85c22b1a19cc1ea0b02b05a51d25"}, + {file = "httpx_ws-0.6.0.tar.gz", hash = "sha256:60218f531fb474a2143af38568f4b7d94ba356780973443365c8e2c87882bb8c"}, +] + +[package.dependencies] +anyio = ">=4" +httpcore = ">=1.0.4" +httpx = ">=0.23.1" +wsproto = "*" + +[[package]] +name = "httpx-ws" +version = "0.6.0" +description = "WebSockets support for HTTPX" +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx_ws-0.6.0-py3-none-any.whl", hash = "sha256:437cfca94519a4e6ae06eb5573192df6c0da85c22b1a19cc1ea0b02b05a51d25"}, + {file = "httpx_ws-0.6.0.tar.gz", hash = "sha256:60218f531fb474a2143af38568f4b7d94ba356780973443365c8e2c87882bb8c"}, +] + +[package.dependencies] +anyio = ">=4" +httpcore = ">=1.0.4" +httpx = ">=0.23.1" +wsproto = "*" + [[package]] name = "idna" version = "3.7" @@ -997,6 +1070,31 @@ files = [ {file = "json5-0.9.25.tar.gz", hash = "sha256:548e41b9be043f9426776f05df8635a00fe06104ea51ed24b67f908856e151ae"}, ] +[[package]] +name = "kr8s" +version = "0.17.2" +description = "A Kubernetes API library" +optional = false +python-versions = ">=3.8" +files = [ + {file = "kr8s-0.17.2-py3-none-any.whl", hash = "sha256:5beba0ccf08c7a2305c0fa8f85fa8d2fe7d3f265872f718489e1bea3162fa91b"}, + {file = "kr8s-0.17.2.tar.gz", hash = "sha256:536d08c3f701365e6ac5ce42c0e8313aa6e6740f92b7077f28209e892af046ab"}, +] + +[package.dependencies] +anyio = ">=3.7.0" +asyncache = ">=0.3.1" +cryptography = ">=35" +httpx = ">=0.24.1" +httpx-ws = ">=0.5.2" +python-box = ">=7.0.1" +python-jsonpath = ">=0.7.1" +pyyaml = ">=6.0" + +[package.extras] +docs = ["furo (>=2023.3.27)", "myst-parser (>=1.0.0)", "sphinx (>=5.3.0)", "sphinx-autoapi (>=2.1.0)", "sphinx-autobuild (>=2021.3.14)", "sphinx-copybutton (>=0.5.1)", "sphinx-design (>=0.3.0)", "sphinxcontrib-mermaid (>=0.8.1)"] +test = ["kubernetes (>=26.1.0)", "kubernetes-asyncio (>=24.2.3)", "kubernetes-validate (>=1.28.0)", "lightkube (>=0.13.0)", "pykube-ng (>=23.6.0)", "pytest (>=7.2.2)", "pytest-asyncio (>=0.20.3)", "pytest-cov (>=4.0.0)", "pytest-kind (>=22.11.1)", "pytest-rerunfailures (>=11.1.2)", "pytest-timeout (>=2.1.0)", "trio (>=0.22.0)", "types-pyyaml (>=6.0)"] + [[package]] name = "kubernetes" version = "31.0.0" @@ -1112,6 +1210,25 @@ files = [ {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, ] +[[package]] +name = "marshmallow" +version = "3.22.0" +description = "A lightweight library for converting complex datatypes to and from native Python datatypes." +optional = false +python-versions = ">=3.8" +files = [ + {file = "marshmallow-3.22.0-py3-none-any.whl", hash = "sha256:71a2dce49ef901c3f97ed296ae5051135fd3febd2bf43afe0ae9a82143a494d9"}, + {file = "marshmallow-3.22.0.tar.gz", hash = "sha256:4972f529104a220bb8637d595aa4c9762afbe7f7a77d82dc58c1615d70c5823e"}, +] + +[package.dependencies] +packaging = ">=17.0" + +[package.extras] +dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"] +docs = ["alabaster (==1.0.0)", "autodocsumm (==0.2.13)", "sphinx (==8.0.2)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"] +tests = ["pytest", "pytz", "simplejson"] + [[package]] name = "multidict" version = "6.0.5" @@ -1245,6 +1362,17 @@ rsa = ["cryptography (>=3.0.0)"] signals = ["blinker (>=1.4.0)"] signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] +[[package]] +name = "packaging" +version = "24.1" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, + {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, +] + [[package]] name = "prometheus-client" version = "0.7.1" @@ -1607,6 +1735,41 @@ dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pyte docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"] tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] +[[package]] +name = "python-box" +version = "7.2.0" +description = "Advanced Python dictionaries with dot notation access" +optional = false +python-versions = ">=3.8" +files = [ + {file = "python_box-7.2.0-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:6bdeec791e25258351388b3029a3ec5da302bb9ed3be175493c43cdc6c47f5e3"}, + {file = "python_box-7.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c449f7b3756a71479fa9c61a86e344ac00ed782a66d7662590f0afa294249d18"}, + {file = "python_box-7.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:6b0d61f182d394106d963232854e495b51edc178faa5316a797be1178212d7e0"}, + {file = "python_box-7.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e2d752de8c1204255bf7b0c814c59ef48293c187a7e9fdcd2fefa28024b72032"}, + {file = "python_box-7.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8a6c35ea356a386077935958a5debcd5b229b9a1b3b26287a52dfe1a7e65d99"}, + {file = "python_box-7.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:32ed58ec4d9e5475efe69f9c7d773dfea90a6a01979e776da93fd2b0a5d04429"}, + {file = "python_box-7.2.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:2a2d664c6a27f7515469b6f1e461935a2038ee130b7d194b4b4db4e85d363618"}, + {file = "python_box-7.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8a5a7365db1aaf600d3e8a2747fcf6833beb5d45439a54318548f02e302e3ec"}, + {file = "python_box-7.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:739f827056ea148cbea3122d4617c994e829b420b1331183d968b175304e3a4f"}, + {file = "python_box-7.2.0-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:2617ef3c3d199f55f63c908f540a4dc14ced9b18533a879e6171c94a6a436f23"}, + {file = "python_box-7.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffd866bed03087b1d8340014da8c3aaae19135767580641df1b4ae6fff6ac0aa"}, + {file = "python_box-7.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:9681f059e7e92bdf20782cd9ea6e533d4711fc7b8c57a462922a025d46add4d0"}, + {file = "python_box-7.2.0-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:6b59b1e2741c9ceecdf5a5bd9b90502c24650e609cd824d434fed3b6f302b7bb"}, + {file = "python_box-7.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e23fae825d809ae7520fdeac88bb52be55a3b63992120a00e381783669edf589"}, + {file = "python_box-7.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:573b1abdcb7bd745fa404444f060ee62fc35a74f067181e55dcb43cfe92f2827"}, + {file = "python_box-7.2.0-py3-none-any.whl", hash = "sha256:a3c90832dd772cb0197fdb5bc06123b6e1b846899a1b53d9c39450d27a584829"}, + {file = "python_box-7.2.0.tar.gz", hash = "sha256:551af20bdab3a60a2a21e3435120453c4ca32f7393787c3a5036e1d9fc6a0ede"}, +] + +[package.extras] +all = ["msgpack", "ruamel.yaml (>=0.17)", "toml"] +msgpack = ["msgpack"] +pyyaml = ["PyYAML"] +ruamel-yaml = ["ruamel.yaml (>=0.17)"] +toml = ["toml"] +tomli = ["tomli", "tomli-w"] +yaml = ["ruamel.yaml (>=0.17)"] + [[package]] name = "python-dateutil" version = "2.9.0.post0" @@ -1641,6 +1804,17 @@ autocompletion = ["argcomplete (>=1.10.0,<3)"] graphql = ["gql[httpx] (>=3.5.0,<4)"] yaml = ["PyYaml (>=6.0.1)"] +[[package]] +name = "python-jsonpath" +version = "1.2.0" +description = "JSONPath, JSON Pointer and JSON Patch for Python." +optional = false +python-versions = ">=3.7" +files = [ + {file = "python_jsonpath-1.2.0-py3-none-any.whl", hash = "sha256:3172c7b87098fced1ed84bd3492bd1a19ef1ad41d4f5b8a3e9a147c750ac08b3"}, + {file = "python_jsonpath-1.2.0.tar.gz", hash = "sha256:a29a84ec3ac38e5dcaa62ac2a215de72c4eb60cb1303e10700da980cf7873775"}, +] + [[package]] name = "python-ulid" version = "2.7.0" @@ -1678,7 +1852,6 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -1686,16 +1859,8 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -1712,7 +1877,6 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -1720,7 +1884,6 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -1835,24 +1998,24 @@ python-versions = ">=3.6" files = [ {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:d92f81886165cb14d7b067ef37e142256f1c6a90a65cd156b063a43da1708cfd"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fff3573c2db359f091e1589c3d7c5fc2f86f5bdb6f24252c2d8e539d4e45f412"}, - {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:aa2267c6a303eb483de8d02db2871afb5c5fc15618d894300b88958f729ad74f"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:024cfe1fc7c7f4e1aff4a81e718109e13409767e4f871443cbff3dba3578203d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win32.whl", hash = "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win_amd64.whl", hash = "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:b5edda50e5e9e15e54a6a8a0070302b00c518a9d32accc2346ad6c984aacd279"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b"}, - {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_24_aarch64.whl", hash = "sha256:1707814f0d9791df063f8c19bb51b0d1278b8e9a2353abbb676c2f685dee6afe"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win32.whl", hash = "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win_amd64.whl", hash = "sha256:c2a72e9109ea74e511e29032f3b670835f8a59bbdc9ce692c5b4ed91ccf1eedb"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ebc06178e8821efc9692ea7544aa5644217358490145629914d8020042c24aa1"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:edaef1c1200c4b4cb914583150dcaa3bc30e592e907c01117c08b13a07255ec2"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:7048c338b6c86627afb27faecf418768acb6331fc24cfa56c93e8c9780f815fa"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92"}, - {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_24_aarch64.whl", hash = "sha256:1dc67314e7e1086c9fdf2680b7b6c2be1c0d8e3a8279f2e993ca2a7545fecf62"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-win32.whl", hash = "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa"}, @@ -1860,7 +2023,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c58ecd827313af6864893e7af0a3bb85fd529f862b6adbefe14643947cfe2942"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_12_0_arm64.whl", hash = "sha256:f481f16baec5290e45aebdc2a5168ebc6d35189ae6fea7a58787613a25f6e875"}, - {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:77159f5d5b5c14f7c34073862a6b7d34944075d9f93e681638f6d753606c6ce6"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:3fcc54cb0c8b811ff66082de1680b4b14cf8a81dce0d4fbf665c2265a81e07a1"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3"}, @@ -1868,7 +2031,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-win_amd64.whl", hash = "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337"}, - {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:305889baa4043a09e5b76f8e2a51d4ffba44259f6b4c72dec8ca56207d9c6fe1"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:665f58bfd29b167039f714c6998178d27ccd83984084c286110ef26b230f259f"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d"}, @@ -1876,7 +2039,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-win_amd64.whl", hash = "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf"}, - {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:a1a45e0bb052edf6a1d3a93baef85319733a888363938e1fc9924cb00c8df24c"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:9eb5dee2772b0f704ca2e45b1713e4e5198c18f515b52743576d196348f374d3"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880"}, @@ -2154,6 +2317,17 @@ files = [ doc = ["reno", "sphinx"] test = ["pytest", "tornado (>=4.5)", "typeguard"] +[[package]] +name = "toml" +version = "0.10.2" +description = "Python Library for Tom's Obvious, Minimal Language" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, + {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, +] + [[package]] name = "tracerite" version = "1.1.1" @@ -2446,7 +2620,38 @@ files = [ {file = "websockets-12.0.tar.gz", hash = "sha256:81df9cbcbb6c260de1e007e58c011bfebe2dafc8435107b0537f393dd38c8b1b"}, ] +[[package]] +name = "werkzeug" +version = "3.0.4" +description = "The comprehensive WSGI web application library." +optional = false +python-versions = ">=3.8" +files = [ + {file = "werkzeug-3.0.4-py3-none-any.whl", hash = "sha256:02c9eb92b7d6c06f31a782811505d2157837cea66aaede3e217c7c27c039476c"}, + {file = "werkzeug-3.0.4.tar.gz", hash = "sha256:34f2371506b250df4d4f84bfe7b0921e4762525762bbd936614909fe25cd7306"}, +] + +[package.dependencies] +MarkupSafe = ">=2.1.1" + +[package.extras] +watchdog = ["watchdog (>=2.3)"] + +[[package]] +name = "wsproto" +version = "1.2.0" +description = "WebSockets state-machine based protocol implementation" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "wsproto-1.2.0-py3-none-any.whl", hash = "sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736"}, + {file = "wsproto-1.2.0.tar.gz", hash = "sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065"}, +] + +[package.dependencies] +h11 = ">=0.9.0,<1" + [metadata] lock-version = "2.0" python-versions = "^3.12" -content-hash = "50073b189070d7ac93e12974f21674a0fe8a1254fd89db3cb880cf47c089798d" +content-hash = "bd6b9d8453cb1bdf5ba00006220a1c1d2a06246e72a47f76bfe7889485393e58" diff --git a/projects/background_jobs/pyproject.toml b/projects/background_jobs/pyproject.toml index cb13e197f..228be00b6 100644 --- a/projects/background_jobs/pyproject.toml +++ b/projects/background_jobs/pyproject.toml @@ -32,7 +32,9 @@ packages = [ { include = "renku_data_services/repositories", from = "../../components" }, { include = "renku_data_services/session", from = "../../components" }, { include = "renku_data_services/platform", from = "../../components" }, + { include = "renku_data_services/data_connectors", from = "../../components" }, { include = "renku_data_services/migrations", from = "../../components" }, + { include = "renku_data_services/notebooks", from = "../../components" }, ] [tool.poetry.dependencies] @@ -62,6 +64,11 @@ sentry-sdk = { version = "^2.14.0", extras = ["sanic"] } # see https://github.com/sanic-org/sanic/issues/2828 for setuptools dependency, remove when not needed anymore setuptools = { version = "^75.1.0" } aiofile = "^3.8.8" +escapism = "^1.0.1" +kr8s = "^0.17.2" +marshmallow = "^3.22.0" +toml = "^0.10.2" +werkzeug = "^3.0.4" [tool.poetry.group.dev.dependencies] pyavro-gen = "^0.3.3" diff --git a/projects/renku_data_service/Dockerfile b/projects/renku_data_service/Dockerfile index 08245b04e..05c1cc672 100644 --- a/projects/renku_data_service/Dockerfile +++ b/projects/renku_data_service/Dockerfile @@ -28,7 +28,7 @@ RUN env/bin/pip --no-cache-dir install projects/renku_data_service/dist/*.whl FROM python:3.12-slim-bookworm ARG USER_UID=1000 ARG USER_GID=$USER_UID -ENV prometheus_multiproc_dir=/prometheus +ENV prometheus_multiproc_dir=/prometheus ENV PROMETHEUS_MULTIPROC_DIR=/prometheus RUN mkdir /prometheus && chown $USER_UID:$USER_GID /prometheus RUN apt-get update && apt-get install -y \ diff --git a/projects/renku_data_service/poetry.lock b/projects/renku_data_service/poetry.lock index 11deb5193..809edf0bd 100644 --- a/projects/renku_data_service/poetry.lock +++ b/projects/renku_data_service/poetry.lock @@ -1,4 +1,21 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. + +[[package]] +name = "aiofile" +version = "3.8.8" +description = "Asynchronous file operations." +optional = false +python-versions = ">=3.7, <4" +files = [ + {file = "aiofile-3.8.8-py3-none-any.whl", hash = "sha256:41e8845cce055779cd77713d949a339deb012eab605b857765e8f8e52a5ed811"}, + {file = "aiofile-3.8.8.tar.gz", hash = "sha256:41f3dc40bd730459d58610476e82e5efb2f84ae6e9fa088a9545385d838b8a43"}, +] + +[package.dependencies] +caio = ">=0.9.0,<0.10.0" + +[package.extras] +develop = ["aiomisc-pytest", "coveralls", "pytest", "pytest-cov", "pytest-rst"] [[package]] name = "aiofile" @@ -28,6 +45,115 @@ files = [ {file = "aiofiles-24.1.0.tar.gz", hash = "sha256:22a075c9e5a3810f0c2e48f3008c94d68c65d763b9b03857924c99e57355166c"}, ] +[[package]] +name = "aiohttp" +version = "3.9.5" +description = "Async http client/server framework (asyncio)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "aiohttp-3.9.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fcde4c397f673fdec23e6b05ebf8d4751314fa7c24f93334bf1f1364c1c69ac7"}, + {file = "aiohttp-3.9.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d6b3f1fabe465e819aed2c421a6743d8debbde79b6a8600739300630a01bf2c"}, + {file = "aiohttp-3.9.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ae79c1bc12c34082d92bf9422764f799aee4746fd7a392db46b7fd357d4a17a"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d3ebb9e1316ec74277d19c5f482f98cc65a73ccd5430540d6d11682cd857430"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84dabd95154f43a2ea80deffec9cb44d2e301e38a0c9d331cc4aa0166fe28ae3"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c8a02fbeca6f63cb1f0475c799679057fc9268b77075ab7cf3f1c600e81dd46b"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c26959ca7b75ff768e2776d8055bf9582a6267e24556bb7f7bd29e677932be72"}, + {file = "aiohttp-3.9.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:714d4e5231fed4ba2762ed489b4aec07b2b9953cf4ee31e9871caac895a839c0"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7a6a8354f1b62e15d48e04350f13e726fa08b62c3d7b8401c0a1314f02e3558"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c413016880e03e69d166efb5a1a95d40f83d5a3a648d16486592c49ffb76d0db"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ff84aeb864e0fac81f676be9f4685f0527b660f1efdc40dcede3c251ef1e867f"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ad7f2919d7dac062f24d6f5fe95d401597fbb015a25771f85e692d043c9d7832"}, + {file = "aiohttp-3.9.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:702e2c7c187c1a498a4e2b03155d52658fdd6fda882d3d7fbb891a5cf108bb10"}, + {file = "aiohttp-3.9.5-cp310-cp310-win32.whl", hash = "sha256:67c3119f5ddc7261d47163ed86d760ddf0e625cd6246b4ed852e82159617b5fb"}, + {file = "aiohttp-3.9.5-cp310-cp310-win_amd64.whl", hash = "sha256:471f0ef53ccedec9995287f02caf0c068732f026455f07db3f01a46e49d76bbb"}, + {file = "aiohttp-3.9.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e0ae53e33ee7476dd3d1132f932eeb39bf6125083820049d06edcdca4381f342"}, + {file = "aiohttp-3.9.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c088c4d70d21f8ca5c0b8b5403fe84a7bc8e024161febdd4ef04575ef35d474d"}, + {file = "aiohttp-3.9.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:639d0042b7670222f33b0028de6b4e2fad6451462ce7df2af8aee37dcac55424"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f26383adb94da5e7fb388d441bf09c61e5e35f455a3217bfd790c6b6bc64b2ee"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:66331d00fb28dc90aa606d9a54304af76b335ae204d1836f65797d6fe27f1ca2"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ff550491f5492ab5ed3533e76b8567f4b37bd2995e780a1f46bca2024223233"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f22eb3a6c1080d862befa0a89c380b4dafce29dc6cd56083f630073d102eb595"}, + {file = "aiohttp-3.9.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a81b1143d42b66ffc40a441379387076243ef7b51019204fd3ec36b9f69e77d6"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f64fd07515dad67f24b6ea4a66ae2876c01031de91c93075b8093f07c0a2d93d"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:93e22add827447d2e26d67c9ac0161756007f152fdc5210277d00a85f6c92323"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:55b39c8684a46e56ef8c8d24faf02de4a2b2ac60d26cee93bc595651ff545de9"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4715a9b778f4293b9f8ae7a0a7cef9829f02ff8d6277a39d7f40565c737d3771"}, + {file = "aiohttp-3.9.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:afc52b8d969eff14e069a710057d15ab9ac17cd4b6753042c407dcea0e40bf75"}, + {file = "aiohttp-3.9.5-cp311-cp311-win32.whl", hash = "sha256:b3df71da99c98534be076196791adca8819761f0bf6e08e07fd7da25127150d6"}, + {file = "aiohttp-3.9.5-cp311-cp311-win_amd64.whl", hash = "sha256:88e311d98cc0bf45b62fc46c66753a83445f5ab20038bcc1b8a1cc05666f428a"}, + {file = "aiohttp-3.9.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:c7a4b7a6cf5b6eb11e109a9755fd4fda7d57395f8c575e166d363b9fc3ec4678"}, + {file = "aiohttp-3.9.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:0a158704edf0abcac8ac371fbb54044f3270bdbc93e254a82b6c82be1ef08f3c"}, + {file = "aiohttp-3.9.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d153f652a687a8e95ad367a86a61e8d53d528b0530ef382ec5aaf533140ed00f"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82a6a97d9771cb48ae16979c3a3a9a18b600a8505b1115cfe354dfb2054468b4"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:60cdbd56f4cad9f69c35eaac0fbbdf1f77b0ff9456cebd4902f3dd1cf096464c"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8676e8fd73141ded15ea586de0b7cda1542960a7b9ad89b2b06428e97125d4fa"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da00da442a0e31f1c69d26d224e1efd3a1ca5bcbf210978a2ca7426dfcae9f58"}, + {file = "aiohttp-3.9.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18f634d540dd099c262e9f887c8bbacc959847cfe5da7a0e2e1cf3f14dbf2daf"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:320e8618eda64e19d11bdb3bd04ccc0a816c17eaecb7e4945d01deee2a22f95f"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:2faa61a904b83142747fc6a6d7ad8fccff898c849123030f8e75d5d967fd4a81"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:8c64a6dc3fe5db7b1b4d2b5cb84c4f677768bdc340611eca673afb7cf416ef5a"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:393c7aba2b55559ef7ab791c94b44f7482a07bf7640d17b341b79081f5e5cd1a"}, + {file = "aiohttp-3.9.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c671dc117c2c21a1ca10c116cfcd6e3e44da7fcde37bf83b2be485ab377b25da"}, + {file = "aiohttp-3.9.5-cp312-cp312-win32.whl", hash = "sha256:5a7ee16aab26e76add4afc45e8f8206c95d1d75540f1039b84a03c3b3800dd59"}, + {file = "aiohttp-3.9.5-cp312-cp312-win_amd64.whl", hash = "sha256:5ca51eadbd67045396bc92a4345d1790b7301c14d1848feaac1d6a6c9289e888"}, + {file = "aiohttp-3.9.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:694d828b5c41255e54bc2dddb51a9f5150b4eefa9886e38b52605a05d96566e8"}, + {file = "aiohttp-3.9.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0605cc2c0088fcaae79f01c913a38611ad09ba68ff482402d3410bf59039bfb8"}, + {file = "aiohttp-3.9.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4558e5012ee03d2638c681e156461d37b7a113fe13970d438d95d10173d25f78"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dbc053ac75ccc63dc3a3cc547b98c7258ec35a215a92bd9f983e0aac95d3d5b"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4109adee842b90671f1b689901b948f347325045c15f46b39797ae1bf17019de"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6ea1a5b409a85477fd8e5ee6ad8f0e40bf2844c270955e09360418cfd09abac"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3c2890ca8c59ee683fd09adf32321a40fe1cf164e3387799efb2acebf090c11"}, + {file = "aiohttp-3.9.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3916c8692dbd9d55c523374a3b8213e628424d19116ac4308e434dbf6d95bbdd"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8d1964eb7617907c792ca00b341b5ec3e01ae8c280825deadbbd678447b127e1"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d5ab8e1f6bee051a4bf6195e38a5c13e5e161cb7bad83d8854524798bd9fcd6e"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:52c27110f3862a1afbcb2af4281fc9fdc40327fa286c4625dfee247c3ba90156"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:7f64cbd44443e80094309875d4f9c71d0401e966d191c3d469cde4642bc2e031"}, + {file = "aiohttp-3.9.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b4f72fbb66279624bfe83fd5eb6aea0022dad8eec62b71e7bf63ee1caadeafe"}, + {file = "aiohttp-3.9.5-cp38-cp38-win32.whl", hash = "sha256:6380c039ec52866c06d69b5c7aad5478b24ed11696f0e72f6b807cfb261453da"}, + {file = "aiohttp-3.9.5-cp38-cp38-win_amd64.whl", hash = "sha256:da22dab31d7180f8c3ac7c7635f3bcd53808f374f6aa333fe0b0b9e14b01f91a"}, + {file = "aiohttp-3.9.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1732102949ff6087589408d76cd6dea656b93c896b011ecafff418c9661dc4ed"}, + {file = "aiohttp-3.9.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c6021d296318cb6f9414b48e6a439a7f5d1f665464da507e8ff640848ee2a58a"}, + {file = "aiohttp-3.9.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:239f975589a944eeb1bad26b8b140a59a3a320067fb3cd10b75c3092405a1372"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b7b30258348082826d274504fbc7c849959f1989d86c29bc355107accec6cfb"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd2adf5c87ff6d8b277814a28a535b59e20bfea40a101db6b3bdca7e9926bc24"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9a3d838441bebcf5cf442700e3963f58b5c33f015341f9ea86dcd7d503c07e2"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e3a1ae66e3d0c17cf65c08968a5ee3180c5a95920ec2731f53343fac9bad106"}, + {file = "aiohttp-3.9.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9c69e77370cce2d6df5d12b4e12bdcca60c47ba13d1cbbc8645dd005a20b738b"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0cbf56238f4bbf49dab8c2dc2e6b1b68502b1e88d335bea59b3f5b9f4c001475"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d1469f228cd9ffddd396d9948b8c9cd8022b6d1bf1e40c6f25b0fb90b4f893ed"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:45731330e754f5811c314901cebdf19dd776a44b31927fa4b4dbecab9e457b0c"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:3fcb4046d2904378e3aeea1df51f697b0467f2aac55d232c87ba162709478c46"}, + {file = "aiohttp-3.9.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8cf142aa6c1a751fcb364158fd710b8a9be874b81889c2bd13aa8893197455e2"}, + {file = "aiohttp-3.9.5-cp39-cp39-win32.whl", hash = "sha256:7b179eea70833c8dee51ec42f3b4097bd6370892fa93f510f76762105568cf09"}, + {file = "aiohttp-3.9.5-cp39-cp39-win_amd64.whl", hash = "sha256:38d80498e2e169bc61418ff36170e0aad0cd268da8b38a17c4cf29d254a8b3f1"}, + {file = "aiohttp-3.9.5.tar.gz", hash = "sha256:edea7d15772ceeb29db4aff55e482d4bcfb6ae160ce144f2682de02f6d693551"}, +] + +[package.dependencies] +aiosignal = ">=1.1.2" +attrs = ">=17.3.0" +frozenlist = ">=1.1.1" +multidict = ">=4.5,<7.0" +yarl = ">=1.0,<2.0" + +[package.extras] +speedups = ["Brotli", "aiodns", "brotlicffi"] + +[[package]] +name = "aiosignal" +version = "1.3.1" +description = "aiosignal: a list of registered asynchronous callbacks" +optional = false +python-versions = ">=3.7" +files = [ + {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, + {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, +] + +[package.dependencies] +frozenlist = ">=1.1.0" + [[package]] name = "alembic" version = "1.13.2" @@ -78,6 +204,17 @@ doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphin test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] trio = ["trio (>=0.23)"] +[[package]] +name = "appier" +version = "1.34.4" +description = "Appier Framework" +optional = false +python-versions = "*" +files = [ + {file = "appier-1.34.4-py2.py3-none-any.whl", hash = "sha256:96769bf56e015175f798958d6eeb189b6fbc2ca799f3b7d2c9bd463eb45a12ae"}, + {file = "appier-1.34.4.tar.gz", hash = "sha256:dd3b244ee2797c7ceda0b81f2331e39724262d4e838f1e53866543136d88ee96"}, +] + [[package]] name = "argcomplete" version = "3.4.0" @@ -92,6 +229,20 @@ files = [ [package.extras] test = ["coverage", "mypy", "pexpect", "ruff", "wheel"] +[[package]] +name = "asyncache" +version = "0.3.1" +description = "Helpers to use cachetools with async code." +optional = false +python-versions = ">=3.8,<4.0" +files = [ + {file = "asyncache-0.3.1-py3-none-any.whl", hash = "sha256:ef20a1024d265090dd1e0785c961cf98b9c32cc7d9478973dcf25ac1b80011f5"}, + {file = "asyncache-0.3.1.tar.gz", hash = "sha256:9a1e60a75668e794657489bdea6540ee7e3259c483517b934670db7600bf5035"}, +] + +[package.dependencies] +cachetools = ">=5.2.0,<6.0.0" + [[package]] name = "asyncpg" version = "0.29.0" @@ -146,6 +297,25 @@ files = [ docs = ["Sphinx (>=5.3.0,<5.4.0)", "sphinx-rtd-theme (>=1.2.2)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)"] test = ["flake8 (>=6.1,<7.0)", "uvloop (>=0.15.3)"] +[[package]] +name = "attrs" +version = "23.2.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.7" +files = [ + {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"}, + {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"}, +] + +[package.extras] +cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] +dev = ["attrs[tests]", "pre-commit"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] +tests = ["attrs[tests-no-zope]", "zope-interface"] +tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] +tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] + [[package]] name = "authlib" version = "1.3.2" @@ -162,20 +332,21 @@ cryptography = "*" [[package]] name = "authzed" -version = "0.18.3" +version = "0.15.0" description = "Client library for SpiceDB." optional = false python-versions = "<4.0,>=3.8" files = [ - {file = "authzed-0.18.3-py3-none-any.whl", hash = "sha256:367baa6f00dac68daad814f7db82464576530a11806ac4b17978d8c0e89511b7"}, - {file = "authzed-0.18.3.tar.gz", hash = "sha256:a06a930c78fdbe61f5caf4415739a9941d23068527fcf91011edc1ae0188a4f5"}, + {file = "authzed-0.15.0-py3-none-any.whl", hash = "sha256:b14069a5fce970b0b4fc05dc86d41cfd3d37bb47adc9dc83ac04adb31380e566"}, + {file = "authzed-0.15.0.tar.gz", hash = "sha256:213dbdd5ae27d98189c138e70be309dbd36d03a84b4e6a048bfeb2595db42764"}, ] [package.dependencies] -googleapis-common-protos = ">=1.65.0,<2.0.0" -grpc-interceptor = ">=0.15.4,<0.16.0" +google_api = ">=0.1.12,<0.2.0" +google-api-core = ">=2.4.0,<3.0.0" grpcio = ">=1.63,<2.0" protobuf = ">=5.26,<6" +typing-extensions = ">=3.7.4,<5" [[package]] name = "avro-preprocessor" @@ -288,13 +459,13 @@ files = [ [[package]] name = "certifi" -version = "2024.6.2" +version = "2024.7.4" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2024.6.2-py3-none-any.whl", hash = "sha256:ddc6c8ce995e6987e7faf5e3f1b02b302836a0e5d98ece18392cb1a36c72ad56"}, - {file = "certifi-2024.6.2.tar.gz", hash = "sha256:3cd43f1c6fa7dedc5899d69d3ad0398fd018ad1a17fba83ddaf78aa46c747516"}, + {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, + {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, ] [[package]] @@ -487,38 +658,43 @@ files = [ [[package]] name = "cryptography" -version = "43.0.1" +version = "42.0.8" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = ">=3.7" files = [ - {file = "cryptography-43.0.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a"}, - {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042"}, - {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494"}, - {file = "cryptography-43.0.1-cp37-abi3-win32.whl", hash = "sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2"}, - {file = "cryptography-43.0.1-cp37-abi3-win_amd64.whl", hash = "sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d"}, - {file = "cryptography-43.0.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1"}, - {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa"}, - {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4"}, - {file = "cryptography-43.0.1-cp39-abi3-win32.whl", hash = "sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47"}, - {file = "cryptography-43.0.1-cp39-abi3-win_amd64.whl", hash = "sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2"}, - {file = "cryptography-43.0.1.tar.gz", hash = "sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d"}, + {file = "cryptography-42.0.8-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:81d8a521705787afe7a18d5bfb47ea9d9cc068206270aad0b96a725022e18d2e"}, + {file = "cryptography-42.0.8-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:961e61cefdcb06e0c6d7e3a1b22ebe8b996eb2bf50614e89384be54c48c6b63d"}, + {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3ec3672626e1b9e55afd0df6d774ff0e953452886e06e0f1eb7eb0c832e8902"}, + {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e599b53fd95357d92304510fb7bda8523ed1f79ca98dce2f43c115950aa78801"}, + {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:5226d5d21ab681f432a9c1cf8b658c0cb02533eece706b155e5fbd8a0cdd3949"}, + {file = "cryptography-42.0.8-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:6b7c4f03ce01afd3b76cf69a5455caa9cfa3de8c8f493e0d3ab7d20611c8dae9"}, + {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:2346b911eb349ab547076f47f2e035fc8ff2c02380a7cbbf8d87114fa0f1c583"}, + {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:ad803773e9df0b92e0a817d22fd8a3675493f690b96130a5e24f1b8fabbea9c7"}, + {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2f66d9cd9147ee495a8374a45ca445819f8929a3efcd2e3df6428e46c3cbb10b"}, + {file = "cryptography-42.0.8-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:d45b940883a03e19e944456a558b67a41160e367a719833c53de6911cabba2b7"}, + {file = "cryptography-42.0.8-cp37-abi3-win32.whl", hash = "sha256:a0c5b2b0585b6af82d7e385f55a8bc568abff8923af147ee3c07bd8b42cda8b2"}, + {file = "cryptography-42.0.8-cp37-abi3-win_amd64.whl", hash = "sha256:57080dee41209e556a9a4ce60d229244f7a66ef52750f813bfbe18959770cfba"}, + {file = "cryptography-42.0.8-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:dea567d1b0e8bc5764b9443858b673b734100c2871dc93163f58c46a97a83d28"}, + {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4783183f7cb757b73b2ae9aed6599b96338eb957233c58ca8f49a49cc32fd5e"}, + {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0608251135d0e03111152e41f0cc2392d1e74e35703960d4190b2e0f4ca9c70"}, + {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dc0fdf6787f37b1c6b08e6dfc892d9d068b5bdb671198c72072828b80bd5fe4c"}, + {file = "cryptography-42.0.8-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:9c0c1716c8447ee7dbf08d6db2e5c41c688544c61074b54fc4564196f55c25a7"}, + {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fff12c88a672ab9c9c1cf7b0c80e3ad9e2ebd9d828d955c126be4fd3e5578c9e"}, + {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:cafb92b2bc622cd1aa6a1dce4b93307792633f4c5fe1f46c6b97cf67073ec961"}, + {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:31f721658a29331f895a5a54e7e82075554ccfb8b163a18719d342f5ffe5ecb1"}, + {file = "cryptography-42.0.8-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b297f90c5723d04bcc8265fc2a0f86d4ea2e0f7ab4b6994459548d3a6b992a14"}, + {file = "cryptography-42.0.8-cp39-abi3-win32.whl", hash = "sha256:2f88d197e66c65be5e42cd72e5c18afbfae3f741742070e3019ac8f4ac57262c"}, + {file = "cryptography-42.0.8-cp39-abi3-win_amd64.whl", hash = "sha256:fa76fbb7596cc5839320000cdd5d0955313696d9511debab7ee7278fc8b5c84a"}, + {file = "cryptography-42.0.8-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ba4f0a211697362e89ad822e667d8d340b4d8d55fae72cdd619389fb5912eefe"}, + {file = "cryptography-42.0.8-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:81884c4d096c272f00aeb1f11cf62ccd39763581645b0812e99a91505fa48e0c"}, + {file = "cryptography-42.0.8-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c9bb2ae11bfbab395bdd072985abde58ea9860ed84e59dbc0463a5d0159f5b71"}, + {file = "cryptography-42.0.8-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7016f837e15b0a1c119d27ecd89b3515f01f90a8615ed5e9427e30d9cdbfed3d"}, + {file = "cryptography-42.0.8-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5a94eccb2a81a309806027e1670a358b99b8fe8bfe9f8d329f27d72c094dde8c"}, + {file = "cryptography-42.0.8-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dec9b018df185f08483f294cae6ccac29e7a6e0678996587363dc352dc65c842"}, + {file = "cryptography-42.0.8-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:343728aac38decfdeecf55ecab3264b015be68fc2816ca800db649607aeee648"}, + {file = "cryptography-42.0.8-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:013629ae70b40af70c9a7a5db40abe5d9054e6f4380e50ce769947b73bf3caad"}, + {file = "cryptography-42.0.8.tar.gz", hash = "sha256:8d09d05439ce7baa8e9e95b07ec5b6c886f548deb7e0f69ef25f64b3bce842f2"}, ] [package.dependencies] @@ -531,7 +707,7 @@ nox = ["nox"] pep8test = ["check-sdist", "click", "mypy", "ruff"] sdist = ["build"] ssh = ["bcrypt (>=3.1.5)"] -test = ["certifi", "cryptography-vectors (==43.0.1)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test = ["certifi", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] test-randomorder = ["pytest-randomly"] [[package]] @@ -658,6 +834,17 @@ files = [ dnspython = ">=2.0.0" idna = ">=2.0.0" +[[package]] +name = "escapism" +version = "1.0.1" +description = "Simple, generic API for escaping strings." +optional = false +python-versions = "*" +files = [ + {file = "escapism-1.0.1-py2.py3-none-any.whl", hash = "sha256:d28f19edc3cb1ffc36fa238956ecc068695477e748f57157c6dde00a6b77f229"}, + {file = "escapism-1.0.1.tar.gz", hash = "sha256:73256bdfb4f22230f0428fc6efecee61cdc4fad531b6f98b849cb9c80711e4ec"}, +] + [[package]] name = "factory-boy" version = "3.3.0" @@ -678,13 +865,13 @@ doc = ["Sphinx", "sphinx-rtd-theme", "sphinxcontrib-spelling"] [[package]] name = "faker" -version = "25.9.1" +version = "26.0.0" description = "Faker is a Python package that generates fake data for you." optional = false python-versions = ">=3.8" files = [ - {file = "Faker-25.9.1-py3-none-any.whl", hash = "sha256:f1dc27dc8035cb7e97e96afbb5fe1305eed6aeea53374702cbac96acfe851626"}, - {file = "Faker-25.9.1.tar.gz", hash = "sha256:0e1cf7a8d3c94de91a65ab1e9cf7050903efae1e97901f8e5924a9f45147ae44"}, + {file = "Faker-26.0.0-py3-none-any.whl", hash = "sha256:886ee28219be96949cd21ecc96c4c742ee1680e77f687b095202c8def1a08f06"}, + {file = "Faker-26.0.0.tar.gz", hash = "sha256:0f60978314973de02c00474c2ae899785a42b2cf4f41b7987e93c132a2b8a4a9"}, ] [package.dependencies] @@ -714,42 +901,42 @@ probabilistic = ["pyprobables (>=0.6,<0.7)"] [[package]] name = "fastavro" -version = "1.9.4" +version = "1.9.5" description = "Fast read/write of AVRO files" optional = false python-versions = ">=3.8" files = [ - {file = "fastavro-1.9.4-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:60cb38f07462a7fb4e4440ed0de67d3d400ae6b3d780f81327bebde9aa55faef"}, - {file = "fastavro-1.9.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:063d01d197fc929c20adc09ca9f0ca86d33ac25ee0963ce0b438244eee8315ae"}, - {file = "fastavro-1.9.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87a9053fcfbc895f2a16a4303af22077e3a8fdcf1cd5d6ed47ff2ef22cbba2f0"}, - {file = "fastavro-1.9.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:02bf1276b7326397314adf41b34a4890f6ffa59cf7e0eb20b9e4ab0a143a1598"}, - {file = "fastavro-1.9.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56bed9eca435389a8861e6e2d631ec7f8f5dda5b23f93517ac710665bd34ca29"}, - {file = "fastavro-1.9.4-cp310-cp310-win_amd64.whl", hash = "sha256:0cd2099c8c672b853e0b20c13e9b62a69d3fbf67ee7c59c7271ba5df1680310d"}, - {file = "fastavro-1.9.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:af8c6d8c43a02b5569c093fc5467469541ac408c79c36a5b0900d3dd0b3ba838"}, - {file = "fastavro-1.9.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4a138710bd61580324d23bc5e3df01f0b82aee0a76404d5dddae73d9e4c723f"}, - {file = "fastavro-1.9.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:903d97418120ca6b6a7f38a731166c1ccc2c4344ee5e0470d09eb1dc3687540a"}, - {file = "fastavro-1.9.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c443eeb99899d062dbf78c525e4614dd77e041a7688fa2710c224f4033f193ae"}, - {file = "fastavro-1.9.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ac26ab0774d1b2b7af6d8f4300ad20bbc4b5469e658a02931ad13ce23635152f"}, - {file = "fastavro-1.9.4-cp311-cp311-win_amd64.whl", hash = "sha256:cf7247874c22be856ba7d1f46a0f6e0379a6025f1a48a7da640444cbac6f570b"}, - {file = "fastavro-1.9.4-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:68912f2020e1b3d70557260b27dd85fb49a4fc6bfab18d384926127452c1da4c"}, - {file = "fastavro-1.9.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6925ce137cdd78e109abdb0bc33aad55de6c9f2d2d3036b65453128f2f5f5b92"}, - {file = "fastavro-1.9.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b928cd294e36e35516d0deb9e104b45be922ba06940794260a4e5dbed6c192a"}, - {file = "fastavro-1.9.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:90c9838bc4c991ffff5dd9d88a0cc0030f938b3fdf038cdf6babde144b920246"}, - {file = "fastavro-1.9.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:eca6e54da571b06a3c5a72dbb7212073f56c92a6fbfbf847b91c347510f8a426"}, - {file = "fastavro-1.9.4-cp312-cp312-win_amd64.whl", hash = "sha256:a4b02839ac261100cefca2e2ad04cdfedc556cb66b5ec735e0db428e74b399de"}, - {file = "fastavro-1.9.4-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:4451ee9a305a73313a1558d471299f3130e4ecc10a88bf5742aa03fb37e042e6"}, - {file = "fastavro-1.9.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8524fccfb379565568c045d29b2ebf71e1f2c0dd484aeda9fe784ef5febe1a8"}, - {file = "fastavro-1.9.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33d0a00a6e09baa20f6f038d7a2ddcb7eef0e7a9980e947a018300cb047091b8"}, - {file = "fastavro-1.9.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:23d7e5b29c9bf6f26e8be754b2c8b919838e506f78ef724de7d22881696712fc"}, - {file = "fastavro-1.9.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2e6ab3ee53944326460edf1125b2ad5be2fadd80f7211b13c45fa0c503b4cf8d"}, - {file = "fastavro-1.9.4-cp38-cp38-win_amd64.whl", hash = "sha256:64d335ec2004204c501f8697c385d0a8f6b521ac82d5b30696f789ff5bc85f3c"}, - {file = "fastavro-1.9.4-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:7e05f44c493e89e73833bd3ff3790538726906d2856f59adc8103539f4a1b232"}, - {file = "fastavro-1.9.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:253c63993250bff4ee7b11fb46cf3a4622180a783bedc82a24c6fdcd1b10ca2a"}, - {file = "fastavro-1.9.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24d6942eb1db14640c2581e0ecd1bbe0afc8a83731fcd3064ae7f429d7880cb7"}, - {file = "fastavro-1.9.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d47bb66be6091cd48cfe026adcad11c8b11d7d815a2949a1e4ccf03df981ca65"}, - {file = "fastavro-1.9.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c293897f12f910e58a1024f9c77f565aa8e23b36aafda6ad8e7041accc57a57f"}, - {file = "fastavro-1.9.4-cp39-cp39-win_amd64.whl", hash = "sha256:f05d2afcb10a92e2a9e580a3891f090589b3e567fdc5641f8a46a0b084f120c3"}, - {file = "fastavro-1.9.4.tar.gz", hash = "sha256:56b8363e360a1256c94562393dc7f8611f3baf2b3159f64fb2b9c6b87b14e876"}, + {file = "fastavro-1.9.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:61253148e95dd2b6457247b441b7555074a55de17aef85f5165bfd5facf600fc"}, + {file = "fastavro-1.9.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b604935d671ad47d888efc92a106f98e9440874108b444ac10e28d643109c937"}, + {file = "fastavro-1.9.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0adbf4956fd53bd74c41e7855bb45ccce953e0eb0e44f5836d8d54ad843f9944"}, + {file = "fastavro-1.9.5-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:53d838e31457db8bf44460c244543f75ed307935d5fc1d93bc631cc7caef2082"}, + {file = "fastavro-1.9.5-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:07b6288e8681eede16ff077632c47395d4925c2f51545cd7a60f194454db2211"}, + {file = "fastavro-1.9.5-cp310-cp310-win_amd64.whl", hash = "sha256:ef08cf247fdfd61286ac0c41854f7194f2ad05088066a756423d7299b688d975"}, + {file = "fastavro-1.9.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c52d7bb69f617c90935a3e56feb2c34d4276819a5c477c466c6c08c224a10409"}, + {file = "fastavro-1.9.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85e05969956003df8fa4491614bc62fe40cec59e94d06e8aaa8d8256ee3aab82"}, + {file = "fastavro-1.9.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06e6df8527493a9f0d9a8778df82bab8b1aa6d80d1b004e5aec0a31dc4dc501c"}, + {file = "fastavro-1.9.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:27820da3b17bc01cebb6d1687c9d7254b16d149ef458871aaa207ed8950f3ae6"}, + {file = "fastavro-1.9.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:195a5b8e33eb89a1a9b63fa9dce7a77d41b3b0cd785bac6044df619f120361a2"}, + {file = "fastavro-1.9.5-cp311-cp311-win_amd64.whl", hash = "sha256:be612c109efb727bfd36d4d7ed28eb8e0506617b7dbe746463ebbf81e85eaa6b"}, + {file = "fastavro-1.9.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b133456c8975ec7d2a99e16a7e68e896e45c821b852675eac4ee25364b999c14"}, + {file = "fastavro-1.9.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf586373c3d1748cac849395aad70c198ee39295f92e7c22c75757b5c0300fbe"}, + {file = "fastavro-1.9.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:724ef192bc9c55d5b4c7df007f56a46a21809463499856349d4580a55e2b914c"}, + {file = "fastavro-1.9.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bfd11fe355a8f9c0416803afac298960eb4c603a23b1c74ff9c1d3e673ea7185"}, + {file = "fastavro-1.9.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9827d1654d7bcb118ef5efd3e5b2c9ab2a48d44dac5e8c6a2327bc3ac3caa828"}, + {file = "fastavro-1.9.5-cp312-cp312-win_amd64.whl", hash = "sha256:d84b69dca296667e6137ae7c9a96d060123adbc0c00532cc47012b64d38b47e9"}, + {file = "fastavro-1.9.5-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:fb744e9de40fb1dc75354098c8db7da7636cba50a40f7bef3b3fb20f8d189d88"}, + {file = "fastavro-1.9.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:240df8bacd13ff5487f2465604c007d686a566df5cbc01d0550684eaf8ff014a"}, + {file = "fastavro-1.9.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3bb35c25bbc3904e1c02333bc1ae0173e0a44aa37a8e95d07e681601246e1f1"}, + {file = "fastavro-1.9.5-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:b47a54a9700de3eabefd36dabfb237808acae47bc873cada6be6990ef6b165aa"}, + {file = "fastavro-1.9.5-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:48c7b5e6d2f3bf7917af301c275b05c5be3dd40bb04e80979c9e7a2ab31a00d1"}, + {file = "fastavro-1.9.5-cp38-cp38-win_amd64.whl", hash = "sha256:05d13f98d4e325be40387e27da9bd60239968862fe12769258225c62ec906f04"}, + {file = "fastavro-1.9.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5b47948eb196263f6111bf34e1cd08d55529d4ed46eb50c1bc8c7c30a8d18868"}, + {file = "fastavro-1.9.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85b7a66ad521298ad9373dfe1897a6ccfc38feab54a47b97922e213ae5ad8870"}, + {file = "fastavro-1.9.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44cb154f863ad80e41aea72a709b12e1533b8728c89b9b1348af91a6154ab2f5"}, + {file = "fastavro-1.9.5-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:b5f7f2b1fe21231fd01f1a2a90e714ae267fe633cd7ce930c0aea33d1c9f4901"}, + {file = "fastavro-1.9.5-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:88fbbe16c61d90a89d78baeb5a34dc1c63a27b115adccdbd6b1fb6f787deacf2"}, + {file = "fastavro-1.9.5-cp39-cp39-win_amd64.whl", hash = "sha256:753f5eedeb5ca86004e23a9ce9b41c5f25eb64a876f95edcc33558090a7f3e4b"}, + {file = "fastavro-1.9.5.tar.gz", hash = "sha256:6419ebf45f88132a9945c51fe555d4f10bb97c236288ed01894f957c6f914553"}, ] [package.extras] @@ -758,6 +945,92 @@ lz4 = ["lz4"] snappy = ["cramjam"] zstandard = ["zstandard"] +[[package]] +name = "frozenlist" +version = "1.4.1" +description = "A list-like structure which implements collections.abc.MutableSequence" +optional = false +python-versions = ">=3.8" +files = [ + {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac"}, + {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868"}, + {file = "frozenlist-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe"}, + {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950"}, + {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc"}, + {file = "frozenlist-1.4.1-cp310-cp310-win32.whl", hash = "sha256:a828c57f00f729620a442881cc60e57cfcec6842ba38e1b19fd3e47ac0ff8dc1"}, + {file = "frozenlist-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:f56e2333dda1fe0f909e7cc59f021eba0d2307bc6f012a1ccf2beca6ba362439"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a0cb6f11204443f27a1628b0e460f37fb30f624be6051d490fa7d7e26d4af3d0"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b46c8ae3a8f1f41a0d2ef350c0b6e65822d80772fe46b653ab6b6274f61d4a49"}, + {file = "frozenlist-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fde5bd59ab5357e3853313127f4d3565fc7dad314a74d7b5d43c22c6a5ed2ced"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:722e1124aec435320ae01ee3ac7bec11a5d47f25d0ed6328f2273d287bc3abb0"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2471c201b70d58a0f0c1f91261542a03d9a5e088ed3dc6c160d614c01649c106"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c757a9dd70d72b076d6f68efdbb9bc943665ae954dad2801b874c8c69e185068"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f146e0911cb2f1da549fc58fc7bcd2b836a44b79ef871980d605ec392ff6b0d2"}, + {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9c515e7914626b2a2e1e311794b4c35720a0be87af52b79ff8e1429fc25f19"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c302220494f5c1ebeb0912ea782bcd5e2f8308037b3c7553fad0e48ebad6ad82"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:442acde1e068288a4ba7acfe05f5f343e19fac87bfc96d89eb886b0363e977ec"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:1b280e6507ea8a4fa0c0a7150b4e526a8d113989e28eaaef946cc77ffd7efc0a"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:fe1a06da377e3a1062ae5fe0926e12b84eceb8a50b350ddca72dc85015873f74"}, + {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db9e724bebd621d9beca794f2a4ff1d26eed5965b004a97f1f1685a173b869c2"}, + {file = "frozenlist-1.4.1-cp311-cp311-win32.whl", hash = "sha256:e774d53b1a477a67838a904131c4b0eef6b3d8a651f8b138b04f748fccfefe17"}, + {file = "frozenlist-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:fb3c2db03683b5767dedb5769b8a40ebb47d6f7f45b1b3e3b4b51ec8ad9d9825"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1979bc0aeb89b33b588c51c54ab0161791149f2461ea7c7c946d95d5f93b56ae"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cc7b01b3754ea68a62bd77ce6020afaffb44a590c2289089289363472d13aedb"}, + {file = "frozenlist-1.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9c92be9fd329ac801cc420e08452b70e7aeab94ea4233a4804f0915c14eba9b"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3894db91f5a489fc8fa6a9991820f368f0b3cbdb9cd8849547ccfab3392d86"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba60bb19387e13597fb059f32cd4d59445d7b18b69a745b8f8e5db0346f33480"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aefbba5f69d42246543407ed2461db31006b0f76c4e32dfd6f42215a2c41d09"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780d3a35680ced9ce682fbcf4cb9c2bad3136eeff760ab33707b71db84664e3a"}, + {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9acbb16f06fe7f52f441bb6f413ebae6c37baa6ef9edd49cdd567216da8600cd"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23b701e65c7b36e4bf15546a89279bd4d8675faabc287d06bbcfac7d3c33e1e6"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3e0153a805a98f5ada7e09826255ba99fb4f7524bb81bf6b47fb702666484ae1"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:dd9b1baec094d91bf36ec729445f7769d0d0cf6b64d04d86e45baf89e2b9059b"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:1a4471094e146b6790f61b98616ab8e44f72661879cc63fa1049d13ef711e71e"}, + {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5667ed53d68d91920defdf4035d1cdaa3c3121dc0b113255124bcfada1cfa1b8"}, + {file = "frozenlist-1.4.1-cp312-cp312-win32.whl", hash = "sha256:beee944ae828747fd7cb216a70f120767fc9f4f00bacae8543c14a6831673f89"}, + {file = "frozenlist-1.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:64536573d0a2cb6e625cf309984e2d873979709f2cf22839bf2d61790b448ad5"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:20b51fa3f588ff2fe658663db52a41a4f7aa6c04f6201449c6c7c476bd255c0d"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:410478a0c562d1a5bcc2f7ea448359fcb050ed48b3c6f6f4f18c313a9bdb1826"}, + {file = "frozenlist-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6321c9efe29975232da3bd0af0ad216800a47e93d763ce64f291917a381b8eb"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f6a4533887e189dae092f1cf981f2e3885175f7a0f33c91fb5b7b682b6bab6"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6eb73fa5426ea69ee0e012fb59cdc76a15b1283d6e32e4f8dc4482ec67d1194d"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbeb989b5cc29e8daf7f976b421c220f1b8c731cbf22b9130d8815418ea45887"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32453c1de775c889eb4e22f1197fe3bdfe457d16476ea407472b9442e6295f7a"}, + {file = "frozenlist-1.4.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693945278a31f2086d9bf3df0fe8254bbeaef1fe71e1351c3bd730aa7d31c41b"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:1d0ce09d36d53bbbe566fe296965b23b961764c0bcf3ce2fa45f463745c04701"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3a670dc61eb0d0eb7080890c13de3066790f9049b47b0de04007090807c776b0"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:dca69045298ce5c11fd539682cff879cc1e664c245d1c64da929813e54241d11"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a06339f38e9ed3a64e4c4e43aec7f59084033647f908e4259d279a52d3757d09"}, + {file = "frozenlist-1.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b7f2f9f912dca3934c1baec2e4585a674ef16fe00218d833856408c48d5beee7"}, + {file = "frozenlist-1.4.1-cp38-cp38-win32.whl", hash = "sha256:e7004be74cbb7d9f34553a5ce5fb08be14fb33bc86f332fb71cbe5216362a497"}, + {file = "frozenlist-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:5a7d70357e7cee13f470c7883a063aae5fe209a493c57d86eb7f5a6f910fae09"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bfa4a17e17ce9abf47a74ae02f32d014c5e9404b6d9ac7f729e01562bbee601e"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b7e3ed87d4138356775346e6845cccbe66cd9e207f3cd11d2f0b9fd13681359d"}, + {file = "frozenlist-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c99169d4ff810155ca50b4da3b075cbde79752443117d89429595c2e8e37fed8"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edb678da49d9f72c9f6c609fbe41a5dfb9a9282f9e6a2253d5a91e0fc382d7c0"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6db4667b187a6742b33afbbaf05a7bc551ffcf1ced0000a571aedbb4aa42fc7b"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55fdc093b5a3cb41d420884cdaf37a1e74c3c37a31f46e66286d9145d2063bd0"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82e8211d69a4f4bc360ea22cd6555f8e61a1bd211d1d5d39d3d228b48c83a897"}, + {file = "frozenlist-1.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89aa2c2eeb20957be2d950b85974b30a01a762f3308cd02bb15e1ad632e22dc7"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d3e0c25a2350080e9319724dede4f31f43a6c9779be48021a7f4ebde8b2d742"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7268252af60904bf52c26173cbadc3a071cece75f873705419c8681f24d3edea"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0c250a29735d4f15321007fb02865f0e6b6a41a6b88f1f523ca1596ab5f50bd5"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:96ec70beabbd3b10e8bfe52616a13561e58fe84c0101dd031dc78f250d5128b9"}, + {file = "frozenlist-1.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:23b2d7679b73fe0e5a4560b672a39f98dfc6f60df63823b0a9970525325b95f6"}, + {file = "frozenlist-1.4.1-cp39-cp39-win32.whl", hash = "sha256:a7496bfe1da7fb1a4e1cc23bb67c58fab69311cc7d32b5a99c2007b4b2a0e932"}, + {file = "frozenlist-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:e6a20a581f9ce92d389a8c7d7c3dd47c81fd5d6e655c8dddf341e14aa48659d0"}, + {file = "frozenlist-1.4.1-py3-none-any.whl", hash = "sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7"}, + {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"}, +] + [[package]] name = "genson" version = "1.3.0" @@ -769,15 +1042,52 @@ files = [ {file = "genson-1.3.0.tar.gz", hash = "sha256:e02db9ac2e3fd29e65b5286f7135762e2cd8a986537c075b06fc5f1517308e37"}, ] +[[package]] +name = "google-api" +version = "0.1.12" +description = "Google API Client" +optional = false +python-versions = "*" +files = [ + {file = "google_api-0.1.12-py2.py3-none-any.whl", hash = "sha256:618f9f2076482a128c408867b5398b291938fe8e653ed7f8ed58fce5042f0c75"}, + {file = "google_api-0.1.12.tar.gz", hash = "sha256:5611c87cdfc6b72927a5e2ea9299ddd6f3a206e29a342b86d3ff3ecc351c30a3"}, +] + +[package.dependencies] +appier = "*" + +[[package]] +name = "google-api-core" +version = "2.19.1" +description = "Google API client core library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google-api-core-2.19.1.tar.gz", hash = "sha256:f4695f1e3650b316a795108a76a1c416e6afb036199d1c1f1f110916df479ffd"}, + {file = "google_api_core-2.19.1-py3-none-any.whl", hash = "sha256:f12a9b8309b5e21d92483bbd47ce2c445861ec7d269ef6784ecc0ea8c1fa6125"}, +] + +[package.dependencies] +google-auth = ">=2.14.1,<3.0.dev0" +googleapis-common-protos = ">=1.56.2,<2.0.dev0" +proto-plus = ">=1.22.3,<2.0.0dev" +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0" +requests = ">=2.18.0,<3.0.0.dev0" + +[package.extras] +grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio-status (>=1.49.1,<2.0.dev0)"] +grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] +grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] + [[package]] name = "google-auth" -version = "2.30.0" +version = "2.32.0" description = "Google Authentication Library" optional = false python-versions = ">=3.7" files = [ - {file = "google-auth-2.30.0.tar.gz", hash = "sha256:ab630a1320f6720909ad76a7dbdb6841cdf5c66b328d690027e4867bdfb16688"}, - {file = "google_auth-2.30.0-py2.py3-none-any.whl", hash = "sha256:8df7da660f62757388b8a7f249df13549b3373f24388cb5d2f1dd91cc18180b5"}, + {file = "google_auth-2.32.0-py2.py3-none-any.whl", hash = "sha256:53326ea2ebec768070a94bee4e1b9194c9646ea0c2bd72422785bd0f9abfad7b"}, + {file = "google_auth-2.32.0.tar.gz", hash = "sha256:49315be72c55a6a37d62819e3573f6b416aca00721f7e3e31a008d928bf64022"}, ] [package.dependencies] @@ -794,13 +1104,13 @@ requests = ["requests (>=2.20.0,<3.0.0.dev0)"] [[package]] name = "googleapis-common-protos" -version = "1.65.0" +version = "1.63.2" description = "Common protobufs used in Google APIs" optional = false python-versions = ">=3.7" files = [ - {file = "googleapis_common_protos-1.65.0-py2.py3-none-any.whl", hash = "sha256:2972e6c496f435b92590fd54045060867f3fe9be2c82ab148fc8885035479a63"}, - {file = "googleapis_common_protos-1.65.0.tar.gz", hash = "sha256:334a29d07cddc3aa01dee4988f9afd9b2916ee2ff49d6b757155dc0d197852c0"}, + {file = "googleapis-common-protos-1.63.2.tar.gz", hash = "sha256:27c5abdffc4911f28101e635de1533fb4cfd2c37fbaa9174587c799fac90aa87"}, + {file = "googleapis_common_protos-1.63.2-py2.py3-none-any.whl", hash = "sha256:27a2499c7e8aff199665b22741997e485eccc8645aa9176c7c988e6fae507945"}, ] [package.dependencies] @@ -880,23 +1190,6 @@ files = [ docs = ["Sphinx", "furo"] test = ["objgraph", "psutil"] -[[package]] -name = "grpc-interceptor" -version = "0.15.4" -description = "Simplifies gRPC interceptors" -optional = false -python-versions = ">=3.7,<4.0" -files = [ - {file = "grpc-interceptor-0.15.4.tar.gz", hash = "sha256:1f45c0bcb58b6f332f37c637632247c9b02bc6af0fdceb7ba7ce8d2ebbfb0926"}, - {file = "grpc_interceptor-0.15.4-py3-none-any.whl", hash = "sha256:0035f33228693ed3767ee49d937bac424318db173fef4d2d0170b3215f254d9d"}, -] - -[package.dependencies] -grpcio = ">=1.49.1,<2.0.0" - -[package.extras] -testing = ["protobuf (>=4.21.9)"] - [[package]] name = "grpcio" version = "1.64.1" @@ -1071,6 +1364,40 @@ http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] zstd = ["zstandard (>=0.18.0)"] +[[package]] +name = "httpx-ws" +version = "0.6.0" +description = "WebSockets support for HTTPX" +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx_ws-0.6.0-py3-none-any.whl", hash = "sha256:437cfca94519a4e6ae06eb5573192df6c0da85c22b1a19cc1ea0b02b05a51d25"}, + {file = "httpx_ws-0.6.0.tar.gz", hash = "sha256:60218f531fb474a2143af38568f4b7d94ba356780973443365c8e2c87882bb8c"}, +] + +[package.dependencies] +anyio = ">=4" +httpcore = ">=1.0.4" +httpx = ">=0.23.1" +wsproto = "*" + +[[package]] +name = "httpx-ws" +version = "0.6.0" +description = "WebSockets support for HTTPX" +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx_ws-0.6.0-py3-none-any.whl", hash = "sha256:437cfca94519a4e6ae06eb5573192df6c0da85c22b1a19cc1ea0b02b05a51d25"}, + {file = "httpx_ws-0.6.0.tar.gz", hash = "sha256:60218f531fb474a2143af38568f4b7d94ba356780973443365c8e2c87882bb8c"}, +] + +[package.dependencies] +anyio = ">=4" +httpcore = ">=1.0.4" +httpx = ">=0.23.1" +wsproto = "*" + [[package]] name = "idna" version = "3.7" @@ -1150,6 +1477,31 @@ files = [ {file = "json5-0.9.25.tar.gz", hash = "sha256:548e41b9be043f9426776f05df8635a00fe06104ea51ed24b67f908856e151ae"}, ] +[[package]] +name = "kr8s" +version = "0.17.0" +description = "A Kubernetes API library" +optional = false +python-versions = ">=3.8" +files = [ + {file = "kr8s-0.17.0-py3-none-any.whl", hash = "sha256:7307bca7b125cdc8c41ec9d7a0b3b1273c4c76b10b992a054aaf1e38309f1445"}, + {file = "kr8s-0.17.0.tar.gz", hash = "sha256:c2afe40461f1b1c853dcde755a64fe4837e05b931c6effbfff12ab32ae224445"}, +] + +[package.dependencies] +anyio = ">=3.7.0" +asyncache = ">=0.3.1" +cryptography = ">=35" +httpx = ">=0.24.1" +httpx-ws = ">=0.5.2" +python-box = ">=7.0.1" +python-jsonpath = ">=0.7.1" +pyyaml = ">=6.0" + +[package.extras] +docs = ["furo (>=2023.3.27)", "myst-parser (>=1.0.0)", "sphinx (>=5.3.0)", "sphinx-autoapi (>=2.1.0)", "sphinx-autobuild (>=2021.3.14)", "sphinx-copybutton (>=0.5.1)", "sphinx-design (>=0.3.0)", "sphinxcontrib-mermaid (>=0.8.1)"] +test = ["kubernetes (>=26.1.0)", "kubernetes-asyncio (>=24.2.3)", "kubernetes-validate (>=1.28.0)", "lightkube (>=0.13.0)", "pykube-ng (>=23.6.0)", "pytest (>=7.2.2)", "pytest-asyncio (>=0.20.3)", "pytest-cov (>=4.0.0)", "pytest-kind (>=22.11.1)", "pytest-rerunfailures (>=11.1.2)", "pytest-timeout (>=2.1.0)", "trio (>=0.22.0)", "types-pyyaml (>=6.0)"] + [[package]] name = "kubernetes" version = "31.0.0" @@ -1177,6 +1529,25 @@ websocket-client = ">=0.32.0,<0.40.0 || >0.40.0,<0.41.dev0 || >=0.43.dev0" [package.extras] adal = ["adal (>=1.0.2)"] +[[package]] +name = "kubernetes-asyncio" +version = "30.1.1" +description = "Kubernetes asynchronous python client" +optional = false +python-versions = "*" +files = [ + {file = "kubernetes_asyncio-30.1.1-py3-none-any.whl", hash = "sha256:3bb40d906ba37f5553bbf0ee9b69947bf14b93c481ed69e2a5ab02aa6ded33d7"}, + {file = "kubernetes_asyncio-30.1.1.tar.gz", hash = "sha256:7523f8650bedb0c9cf5264f2b043ee94fab9b0d29a142c63d59d435bd9df66d7"}, +] + +[package.dependencies] +aiohttp = ">=3.9.0,<4.0.0" +certifi = ">=14.05.14" +python-dateutil = ">=2.5.3" +pyyaml = ">=3.12" +six = ">=1.9.0" +urllib3 = ">=1.24.2" + [[package]] name = "mako" version = "1.3.5" @@ -1265,6 +1636,25 @@ files = [ {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, ] +[[package]] +name = "marshmallow" +version = "3.21.3" +description = "A lightweight library for converting complex datatypes to and from native Python datatypes." +optional = false +python-versions = ">=3.8" +files = [ + {file = "marshmallow-3.21.3-py3-none-any.whl", hash = "sha256:86ce7fb914aa865001a4b2092c4c2872d13bc347f3d42673272cabfdbad386f1"}, + {file = "marshmallow-3.21.3.tar.gz", hash = "sha256:4f57c5e050a54d66361e826f94fba213eb10b67b2fdb02c3e0343ce207ba1662"}, +] + +[package.dependencies] +packaging = ">=17.0" + +[package.extras] +dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"] +docs = ["alabaster (==0.7.16)", "autodocsumm (==0.2.12)", "sphinx (==7.3.7)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"] +tests = ["pytest", "pytz", "simplejson"] + [[package]] name = "multidict" version = "6.0.5" @@ -1475,24 +1865,41 @@ files = [ prometheus-client = ">=0.7.1,<0.8.0" sanic = ">=22.0.0" +[[package]] +name = "proto-plus" +version = "1.24.0" +description = "Beautiful, Pythonic protocol buffers." +optional = false +python-versions = ">=3.7" +files = [ + {file = "proto-plus-1.24.0.tar.gz", hash = "sha256:30b72a5ecafe4406b0d339db35b56c4059064e69227b8c3bda7462397f966445"}, + {file = "proto_plus-1.24.0-py3-none-any.whl", hash = "sha256:402576830425e5f6ce4c2a6702400ac79897dab0b4343821aa5188b0fab81a12"}, +] + +[package.dependencies] +protobuf = ">=3.19.0,<6.0.0dev" + +[package.extras] +testing = ["google-api-core (>=1.31.5)"] + [[package]] name = "protobuf" -version = "5.27.1" +version = "5.27.2" description = "" optional = false python-versions = ">=3.8" files = [ - {file = "protobuf-5.27.1-cp310-abi3-win32.whl", hash = "sha256:3adc15ec0ff35c5b2d0992f9345b04a540c1e73bfee3ff1643db43cc1d734333"}, - {file = "protobuf-5.27.1-cp310-abi3-win_amd64.whl", hash = "sha256:25236b69ab4ce1bec413fd4b68a15ef8141794427e0b4dc173e9d5d9dffc3bcd"}, - {file = "protobuf-5.27.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:4e38fc29d7df32e01a41cf118b5a968b1efd46b9c41ff515234e794011c78b17"}, - {file = "protobuf-5.27.1-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:917ed03c3eb8a2d51c3496359f5b53b4e4b7e40edfbdd3d3f34336e0eef6825a"}, - {file = "protobuf-5.27.1-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:ee52874a9e69a30271649be88ecbe69d374232e8fd0b4e4b0aaaa87f429f1631"}, - {file = "protobuf-5.27.1-cp38-cp38-win32.whl", hash = "sha256:7a97b9c5aed86b9ca289eb5148df6c208ab5bb6906930590961e08f097258107"}, - {file = "protobuf-5.27.1-cp38-cp38-win_amd64.whl", hash = "sha256:f6abd0f69968792da7460d3c2cfa7d94fd74e1c21df321eb6345b963f9ec3d8d"}, - {file = "protobuf-5.27.1-cp39-cp39-win32.whl", hash = "sha256:dfddb7537f789002cc4eb00752c92e67885badcc7005566f2c5de9d969d3282d"}, - {file = "protobuf-5.27.1-cp39-cp39-win_amd64.whl", hash = "sha256:39309898b912ca6febb0084ea912e976482834f401be35840a008da12d189340"}, - {file = "protobuf-5.27.1-py3-none-any.whl", hash = "sha256:4ac7249a1530a2ed50e24201d6630125ced04b30619262f06224616e0030b6cf"}, - {file = "protobuf-5.27.1.tar.gz", hash = "sha256:df5e5b8e39b7d1c25b186ffdf9f44f40f810bbcc9d2b71d9d3156fee5a9adf15"}, + {file = "protobuf-5.27.2-cp310-abi3-win32.whl", hash = "sha256:354d84fac2b0d76062e9b3221f4abbbacdfd2a4d8af36bab0474f3a0bb30ab38"}, + {file = "protobuf-5.27.2-cp310-abi3-win_amd64.whl", hash = "sha256:0e341109c609749d501986b835f667c6e1e24531096cff9d34ae411595e26505"}, + {file = "protobuf-5.27.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a109916aaac42bff84702fb5187f3edadbc7c97fc2c99c5ff81dd15dcce0d1e5"}, + {file = "protobuf-5.27.2-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:176c12b1f1c880bf7a76d9f7c75822b6a2bc3db2d28baa4d300e8ce4cde7409b"}, + {file = "protobuf-5.27.2-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:b848dbe1d57ed7c191dfc4ea64b8b004a3f9ece4bf4d0d80a367b76df20bf36e"}, + {file = "protobuf-5.27.2-cp38-cp38-win32.whl", hash = "sha256:4fadd8d83e1992eed0248bc50a4a6361dc31bcccc84388c54c86e530b7f58863"}, + {file = "protobuf-5.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:610e700f02469c4a997e58e328cac6f305f649826853813177e6290416e846c6"}, + {file = "protobuf-5.27.2-cp39-cp39-win32.whl", hash = "sha256:9e8f199bf7f97bd7ecebffcae45ebf9527603549b2b562df0fbc6d4d688f14ca"}, + {file = "protobuf-5.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:7fc3add9e6003e026da5fc9e59b131b8f22b428b991ccd53e2af8071687b4fce"}, + {file = "protobuf-5.27.2-py3-none-any.whl", hash = "sha256:54330f07e4949d09614707c48b06d1a22f8ffb5763c159efd5c0928326a91470"}, + {file = "protobuf-5.27.2.tar.gz", hash = "sha256:f3ecdef226b9af856075f28227ff2c90ce3a594d092c39bee5513573f25e2714"}, ] [[package]] @@ -1809,6 +2216,41 @@ dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pyte docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"] tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] +[[package]] +name = "python-box" +version = "7.2.0" +description = "Advanced Python dictionaries with dot notation access" +optional = false +python-versions = ">=3.8" +files = [ + {file = "python_box-7.2.0-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:6bdeec791e25258351388b3029a3ec5da302bb9ed3be175493c43cdc6c47f5e3"}, + {file = "python_box-7.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c449f7b3756a71479fa9c61a86e344ac00ed782a66d7662590f0afa294249d18"}, + {file = "python_box-7.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:6b0d61f182d394106d963232854e495b51edc178faa5316a797be1178212d7e0"}, + {file = "python_box-7.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e2d752de8c1204255bf7b0c814c59ef48293c187a7e9fdcd2fefa28024b72032"}, + {file = "python_box-7.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8a6c35ea356a386077935958a5debcd5b229b9a1b3b26287a52dfe1a7e65d99"}, + {file = "python_box-7.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:32ed58ec4d9e5475efe69f9c7d773dfea90a6a01979e776da93fd2b0a5d04429"}, + {file = "python_box-7.2.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:2a2d664c6a27f7515469b6f1e461935a2038ee130b7d194b4b4db4e85d363618"}, + {file = "python_box-7.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8a5a7365db1aaf600d3e8a2747fcf6833beb5d45439a54318548f02e302e3ec"}, + {file = "python_box-7.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:739f827056ea148cbea3122d4617c994e829b420b1331183d968b175304e3a4f"}, + {file = "python_box-7.2.0-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:2617ef3c3d199f55f63c908f540a4dc14ced9b18533a879e6171c94a6a436f23"}, + {file = "python_box-7.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffd866bed03087b1d8340014da8c3aaae19135767580641df1b4ae6fff6ac0aa"}, + {file = "python_box-7.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:9681f059e7e92bdf20782cd9ea6e533d4711fc7b8c57a462922a025d46add4d0"}, + {file = "python_box-7.2.0-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:6b59b1e2741c9ceecdf5a5bd9b90502c24650e609cd824d434fed3b6f302b7bb"}, + {file = "python_box-7.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e23fae825d809ae7520fdeac88bb52be55a3b63992120a00e381783669edf589"}, + {file = "python_box-7.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:573b1abdcb7bd745fa404444f060ee62fc35a74f067181e55dcb43cfe92f2827"}, + {file = "python_box-7.2.0-py3-none-any.whl", hash = "sha256:a3c90832dd772cb0197fdb5bc06123b6e1b846899a1b53d9c39450d27a584829"}, + {file = "python_box-7.2.0.tar.gz", hash = "sha256:551af20bdab3a60a2a21e3435120453c4ca32f7393787c3a5036e1d9fc6a0ede"}, +] + +[package.extras] +all = ["msgpack", "ruamel.yaml (>=0.17)", "toml"] +msgpack = ["msgpack"] +pyyaml = ["PyYAML"] +ruamel-yaml = ["ruamel.yaml (>=0.17)"] +toml = ["toml"] +tomli = ["tomli", "tomli-w"] +yaml = ["ruamel.yaml (>=0.17)"] + [[package]] name = "python-dateutil" version = "2.9.0.post0" @@ -1843,6 +2285,17 @@ autocompletion = ["argcomplete (>=1.10.0,<3)"] graphql = ["gql[httpx] (>=3.5.0,<4)"] yaml = ["PyYaml (>=6.0.1)"] +[[package]] +name = "python-jsonpath" +version = "1.1.1" +description = "JSONPath, JSON Pointer and JSON Patch for Python." +optional = false +python-versions = ">=3.7" +files = [ + {file = "python_jsonpath-1.1.1-py3-none-any.whl", hash = "sha256:43f2622b7aaaf4f45dd873e80cfd181058503e08ffdeac5218135f3a97bd0aec"}, + {file = "python_jsonpath-1.1.1.tar.gz", hash = "sha256:d2944e1f7a1d6c8fa958724f9570b8f04a4e00ab6bf1e4733346ab8dcef1f74f"}, +] + [[package]] name = "python-ulid" version = "2.7.0" @@ -1880,7 +2333,6 @@ files = [ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, @@ -1888,16 +2340,8 @@ files = [ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, @@ -1914,7 +2358,6 @@ files = [ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, @@ -1922,7 +2365,6 @@ files = [ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, @@ -2037,24 +2479,24 @@ python-versions = ">=3.6" files = [ {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b42169467c42b692c19cf539c38d4602069d8c1505e97b86387fcf7afb766e1d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:07238db9cbdf8fc1e9de2489a4f68474e70dffcb32232db7c08fa61ca0c7c462"}, + {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:d92f81886165cb14d7b067ef37e142256f1c6a90a65cd156b063a43da1708cfd"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fff3573c2db359f091e1589c3d7c5fc2f86f5bdb6f24252c2d8e539d4e45f412"}, - {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-manylinux_2_24_aarch64.whl", hash = "sha256:aa2267c6a303eb483de8d02db2871afb5c5fc15618d894300b88958f729ad74f"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:840f0c7f194986a63d2c2465ca63af8ccbbc90ab1c6001b1978f05119b5e7334"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:024cfe1fc7c7f4e1aff4a81e718109e13409767e4f871443cbff3dba3578203d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win32.whl", hash = "sha256:c69212f63169ec1cfc9bb44723bf2917cbbd8f6191a00ef3410f5a7fe300722d"}, {file = "ruamel.yaml.clib-0.2.8-cp310-cp310-win_amd64.whl", hash = "sha256:cabddb8d8ead485e255fe80429f833172b4cadf99274db39abc080e068cbcc31"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:bef08cd86169d9eafb3ccb0a39edb11d8e25f3dae2b28f5c52fd997521133069"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:b16420e621d26fdfa949a8b4b47ade8810c56002f5389970db4ddda51dbff248"}, + {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:b5edda50e5e9e15e54a6a8a0070302b00c518a9d32accc2346ad6c984aacd279"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:25c515e350e5b739842fc3228d662413ef28f295791af5e5110b543cf0b57d9b"}, - {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-manylinux_2_24_aarch64.whl", hash = "sha256:1707814f0d9791df063f8c19bb51b0d1278b8e9a2353abbb676c2f685dee6afe"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:46d378daaac94f454b3a0e3d8d78cafd78a026b1d71443f4966c696b48a6d899"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:09b055c05697b38ecacb7ac50bdab2240bfca1a0c4872b0fd309bb07dc9aa3a9"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win32.whl", hash = "sha256:53a300ed9cea38cf5a2a9b069058137c2ca1ce658a874b79baceb8f892f915a7"}, {file = "ruamel.yaml.clib-0.2.8-cp311-cp311-win_amd64.whl", hash = "sha256:c2a72e9109ea74e511e29032f3b670835f8a59bbdc9ce692c5b4ed91ccf1eedb"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ebc06178e8821efc9692ea7544aa5644217358490145629914d8020042c24aa1"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:edaef1c1200c4b4cb914583150dcaa3bc30e592e907c01117c08b13a07255ec2"}, + {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:7048c338b6c86627afb27faecf418768acb6331fc24cfa56c93e8c9780f815fa"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92"}, - {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_24_aarch64.whl", hash = "sha256:1dc67314e7e1086c9fdf2680b7b6c2be1c0d8e3a8279f2e993ca2a7545fecf62"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d"}, {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-win32.whl", hash = "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa"}, @@ -2062,7 +2504,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a5aa27bad2bb83670b71683aae140a1f52b0857a2deff56ad3f6c13a017a26ed"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c58ecd827313af6864893e7af0a3bb85fd529f862b6adbefe14643947cfe2942"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-macosx_12_0_arm64.whl", hash = "sha256:f481f16baec5290e45aebdc2a5168ebc6d35189ae6fea7a58787613a25f6e875"}, - {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_24_aarch64.whl", hash = "sha256:77159f5d5b5c14f7c34073862a6b7d34944075d9f93e681638f6d753606c6ce6"}, + {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:3fcc54cb0c8b811ff66082de1680b4b14cf8a81dce0d4fbf665c2265a81e07a1"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7f67a1ee819dc4562d444bbafb135832b0b909f81cc90f7aa00260968c9ca1b3"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4ecbf9c3e19f9562c7fdd462e8d18dd902a47ca046a2e64dba80699f0b6c09b7"}, {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:87ea5ff66d8064301a154b3933ae406b0863402a799b16e4a1d24d9fbbcbe0d3"}, @@ -2070,7 +2512,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp37-cp37m-win_amd64.whl", hash = "sha256:3f215c5daf6a9d7bbed4a0a4f760f3113b10e82ff4c5c44bec20a68c8014f675"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1b617618914cb00bf5c34d4357c37aa15183fa229b24767259657746c9077615"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:a6a9ffd280b71ad062eae53ac1659ad86a17f59a0fdc7699fd9be40525153337"}, - {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_24_aarch64.whl", hash = "sha256:305889baa4043a09e5b76f8e2a51d4ffba44259f6b4c72dec8ca56207d9c6fe1"}, + {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:665f58bfd29b167039f714c6998178d27ccd83984084c286110ef26b230f259f"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:700e4ebb569e59e16a976857c8798aee258dceac7c7d6b50cab63e080058df91"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e2b4c44b60eadec492926a7270abb100ef9f72798e18743939bdbf037aab8c28"}, {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e79e5db08739731b0ce4850bed599235d601701d5694c36570a99a0c5ca41a9d"}, @@ -2078,7 +2520,7 @@ files = [ {file = "ruamel.yaml.clib-0.2.8-cp38-cp38-win_amd64.whl", hash = "sha256:56f4252222c067b4ce51ae12cbac231bce32aee1d33fbfc9d17e5b8d6966c312"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:03d1162b6d1df1caa3a4bd27aa51ce17c9afc2046c31b0ad60a0a96ec22f8001"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:bba64af9fa9cebe325a62fa398760f5c7206b215201b0ec825005f1b18b9bccf"}, - {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_24_aarch64.whl", hash = "sha256:a1a45e0bb052edf6a1d3a93baef85319733a888363938e1fc9924cb00c8df24c"}, + {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:9eb5dee2772b0f704ca2e45b1713e4e5198c18f515b52743576d196348f374d3"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:da09ad1c359a728e112d60116f626cc9f29730ff3e0e7db72b9a2dbc2e4beed5"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:184565012b60405d93838167f425713180b949e9d8dd0bbc7b49f074407c5a8b"}, {file = "ruamel.yaml.clib-0.2.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a75879bacf2c987c003368cf14bed0ffe99e8e85acfa6c0bfffc21a090f16880"}, @@ -2356,6 +2798,17 @@ files = [ doc = ["reno", "sphinx"] test = ["pytest", "tornado (>=4.5)", "typeguard"] +[[package]] +name = "toml" +version = "0.10.2" +description = "Python Library for Tom's Obvious, Minimal Language" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, + {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, +] + [[package]] name = "tracerite" version = "1.1.1" @@ -2648,7 +3101,141 @@ files = [ {file = "websockets-12.0.tar.gz", hash = "sha256:81df9cbcbb6c260de1e007e58c011bfebe2dafc8435107b0537f393dd38c8b1b"}, ] +[[package]] +name = "werkzeug" +version = "3.0.3" +description = "The comprehensive WSGI web application library." +optional = false +python-versions = ">=3.8" +files = [ + {file = "werkzeug-3.0.3-py3-none-any.whl", hash = "sha256:fc9645dc43e03e4d630d23143a04a7f947a9a3b5727cd535fdfe155a17cc48c8"}, + {file = "werkzeug-3.0.3.tar.gz", hash = "sha256:097e5bfda9f0aba8da6b8545146def481d06aa7d3266e7448e2cccf67dd8bd18"}, +] + +[package.dependencies] +MarkupSafe = ">=2.1.1" + +[package.extras] +watchdog = ["watchdog (>=2.3)"] + +[[package]] +name = "wsproto" +version = "1.2.0" +description = "WebSockets state-machine based protocol implementation" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "wsproto-1.2.0-py3-none-any.whl", hash = "sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736"}, + {file = "wsproto-1.2.0.tar.gz", hash = "sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065"}, +] + +[package.dependencies] +h11 = ">=0.9.0,<1" + +[[package]] +name = "yarl" +version = "1.9.4" +description = "Yet another URL library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a8c1df72eb746f4136fe9a2e72b0c9dc1da1cbd23b5372f94b5820ff8ae30e0e"}, + {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a3a6ed1d525bfb91b3fc9b690c5a21bb52de28c018530ad85093cc488bee2dd2"}, + {file = "yarl-1.9.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c38c9ddb6103ceae4e4498f9c08fac9b590c5c71b0370f98714768e22ac6fa66"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9e09c9d74f4566e905a0b8fa668c58109f7624db96a2171f21747abc7524234"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8477c1ee4bd47c57d49621a062121c3023609f7a13b8a46953eb6c9716ca392"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5ff2c858f5f6a42c2a8e751100f237c5e869cbde669a724f2062d4c4ef93551"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:357495293086c5b6d34ca9616a43d329317feab7917518bc97a08f9e55648455"}, + {file = "yarl-1.9.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54525ae423d7b7a8ee81ba189f131054defdb122cde31ff17477951464c1691c"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:801e9264d19643548651b9db361ce3287176671fb0117f96b5ac0ee1c3530d53"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e516dc8baf7b380e6c1c26792610230f37147bb754d6426462ab115a02944385"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:7d5aaac37d19b2904bb9dfe12cdb08c8443e7ba7d2852894ad448d4b8f442863"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:54beabb809ffcacbd9d28ac57b0db46e42a6e341a030293fb3185c409e626b8b"}, + {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bac8d525a8dbc2a1507ec731d2867025d11ceadcb4dd421423a5d42c56818541"}, + {file = "yarl-1.9.4-cp310-cp310-win32.whl", hash = "sha256:7855426dfbddac81896b6e533ebefc0af2f132d4a47340cee6d22cac7190022d"}, + {file = "yarl-1.9.4-cp310-cp310-win_amd64.whl", hash = "sha256:848cd2a1df56ddbffeb375535fb62c9d1645dde33ca4d51341378b3f5954429b"}, + {file = "yarl-1.9.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:35a2b9396879ce32754bd457d31a51ff0a9d426fd9e0e3c33394bf4b9036b099"}, + {file = "yarl-1.9.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c7d56b293cc071e82532f70adcbd8b61909eec973ae9d2d1f9b233f3d943f2c"}, + {file = "yarl-1.9.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d8a1c6c0be645c745a081c192e747c5de06e944a0d21245f4cf7c05e457c36e0"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b3c1ffe10069f655ea2d731808e76e0f452fc6c749bea04781daf18e6039525"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:549d19c84c55d11687ddbd47eeb348a89df9cb30e1993f1b128f4685cd0ebbf8"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7409f968456111140c1c95301cadf071bd30a81cbd7ab829169fb9e3d72eae9"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e23a6d84d9d1738dbc6e38167776107e63307dfc8ad108e580548d1f2c587f42"}, + {file = "yarl-1.9.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8b889777de69897406c9fb0b76cdf2fd0f31267861ae7501d93003d55f54fbe"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:03caa9507d3d3c83bca08650678e25364e1843b484f19986a527630ca376ecce"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4e9035df8d0880b2f1c7f5031f33f69e071dfe72ee9310cfc76f7b605958ceb9"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:c0ec0ed476f77db9fb29bca17f0a8fcc7bc97ad4c6c1d8959c507decb22e8572"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:ee04010f26d5102399bd17f8df8bc38dc7ccd7701dc77f4a68c5b8d733406958"}, + {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:49a180c2e0743d5d6e0b4d1a9e5f633c62eca3f8a86ba5dd3c471060e352ca98"}, + {file = "yarl-1.9.4-cp311-cp311-win32.whl", hash = "sha256:81eb57278deb6098a5b62e88ad8281b2ba09f2f1147c4767522353eaa6260b31"}, + {file = "yarl-1.9.4-cp311-cp311-win_amd64.whl", hash = "sha256:d1d2532b340b692880261c15aee4dc94dd22ca5d61b9db9a8a361953d36410b1"}, + {file = "yarl-1.9.4-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0d2454f0aef65ea81037759be5ca9947539667eecebca092733b2eb43c965a81"}, + {file = "yarl-1.9.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:44d8ffbb9c06e5a7f529f38f53eda23e50d1ed33c6c869e01481d3fafa6b8142"}, + {file = "yarl-1.9.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:aaaea1e536f98754a6e5c56091baa1b6ce2f2700cc4a00b0d49eca8dea471074"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3777ce5536d17989c91696db1d459574e9a9bd37660ea7ee4d3344579bb6f129"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fc5fc1eeb029757349ad26bbc5880557389a03fa6ada41703db5e068881e5f2"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea65804b5dc88dacd4a40279af0cdadcfe74b3e5b4c897aa0d81cf86927fee78"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa102d6d280a5455ad6a0f9e6d769989638718e938a6a0a2ff3f4a7ff8c62cc4"}, + {file = "yarl-1.9.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09efe4615ada057ba2d30df871d2f668af661e971dfeedf0c159927d48bbeff0"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:008d3e808d03ef28542372d01057fd09168419cdc8f848efe2804f894ae03e51"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:6f5cb257bc2ec58f437da2b37a8cd48f666db96d47b8a3115c29f316313654ff"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:992f18e0ea248ee03b5a6e8b3b4738850ae7dbb172cc41c966462801cbf62cf7"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0e9d124c191d5b881060a9e5060627694c3bdd1fe24c5eecc8d5d7d0eb6faabc"}, + {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3986b6f41ad22988e53d5778f91855dc0399b043fc8946d4f2e68af22ee9ff10"}, + {file = "yarl-1.9.4-cp312-cp312-win32.whl", hash = "sha256:4b21516d181cd77ebd06ce160ef8cc2a5e9ad35fb1c5930882baff5ac865eee7"}, + {file = "yarl-1.9.4-cp312-cp312-win_amd64.whl", hash = "sha256:a9bd00dc3bc395a662900f33f74feb3e757429e545d831eef5bb280252631984"}, + {file = "yarl-1.9.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:63b20738b5aac74e239622d2fe30df4fca4942a86e31bf47a81a0e94c14df94f"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d7f7de27b8944f1fee2c26a88b4dabc2409d2fea7a9ed3df79b67277644e17"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c74018551e31269d56fab81a728f683667e7c28c04e807ba08f8c9e3bba32f14"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca06675212f94e7a610e85ca36948bb8fc023e458dd6c63ef71abfd482481aa5"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5aef935237d60a51a62b86249839b51345f47564208c6ee615ed2a40878dccdd"}, + {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b134fd795e2322b7684155b7855cc99409d10b2e408056db2b93b51a52accc7"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d25039a474c4c72a5ad4b52495056f843a7ff07b632c1b92ea9043a3d9950f6e"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f7d6b36dd2e029b6bcb8a13cf19664c7b8e19ab3a58e0fefbb5b8461447ed5ec"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:957b4774373cf6f709359e5c8c4a0af9f6d7875db657adb0feaf8d6cb3c3964c"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:d7eeb6d22331e2fd42fce928a81c697c9ee2d51400bd1a28803965883e13cead"}, + {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6a962e04b8f91f8c4e5917e518d17958e3bdee71fd1d8b88cdce74dd0ebbf434"}, + {file = "yarl-1.9.4-cp37-cp37m-win32.whl", hash = "sha256:f3bc6af6e2b8f92eced34ef6a96ffb248e863af20ef4fde9448cc8c9b858b749"}, + {file = "yarl-1.9.4-cp37-cp37m-win_amd64.whl", hash = "sha256:ad4d7a90a92e528aadf4965d685c17dacff3df282db1121136c382dc0b6014d2"}, + {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ec61d826d80fc293ed46c9dd26995921e3a82146feacd952ef0757236fc137be"}, + {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8be9e837ea9113676e5754b43b940b50cce76d9ed7d2461df1af39a8ee674d9f"}, + {file = "yarl-1.9.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bef596fdaa8f26e3d66af846bbe77057237cb6e8efff8cd7cc8dff9a62278bbf"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d47552b6e52c3319fede1b60b3de120fe83bde9b7bddad11a69fb0af7db32f1"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84fc30f71689d7fc9168b92788abc977dc8cefa806909565fc2951d02f6b7d57"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4aa9741085f635934f3a2583e16fcf62ba835719a8b2b28fb2917bb0537c1dfa"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:206a55215e6d05dbc6c98ce598a59e6fbd0c493e2de4ea6cc2f4934d5a18d130"}, + {file = "yarl-1.9.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07574b007ee20e5c375a8fe4a0789fad26db905f9813be0f9fef5a68080de559"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5a2e2433eb9344a163aced6a5f6c9222c0786e5a9e9cac2c89f0b28433f56e23"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6ad6d10ed9b67a382b45f29ea028f92d25bc0bc1daf6c5b801b90b5aa70fb9ec"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:6fe79f998a4052d79e1c30eeb7d6c1c1056ad33300f682465e1b4e9b5a188b78"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a825ec844298c791fd28ed14ed1bffc56a98d15b8c58a20e0e08c1f5f2bea1be"}, + {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8619d6915b3b0b34420cf9b2bb6d81ef59d984cb0fde7544e9ece32b4b3043c3"}, + {file = "yarl-1.9.4-cp38-cp38-win32.whl", hash = "sha256:686a0c2f85f83463272ddffd4deb5e591c98aac1897d65e92319f729c320eece"}, + {file = "yarl-1.9.4-cp38-cp38-win_amd64.whl", hash = "sha256:a00862fb23195b6b8322f7d781b0dc1d82cb3bcac346d1e38689370cc1cc398b"}, + {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:604f31d97fa493083ea21bd9b92c419012531c4e17ea6da0f65cacdcf5d0bd27"}, + {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8a854227cf581330ffa2c4824d96e52ee621dd571078a252c25e3a3b3d94a1b1"}, + {file = "yarl-1.9.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ba6f52cbc7809cd8d74604cce9c14868306ae4aa0282016b641c661f981a6e91"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6327976c7c2f4ee6816eff196e25385ccc02cb81427952414a64811037bbc8b"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8397a3817d7dcdd14bb266283cd1d6fc7264a48c186b986f32e86d86d35fbac5"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0381b4ce23ff92f8170080c97678040fc5b08da85e9e292292aba67fdac6c34"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23d32a2594cb5d565d358a92e151315d1b2268bc10f4610d098f96b147370136"}, + {file = "yarl-1.9.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ddb2a5c08a4eaaba605340fdee8fc08e406c56617566d9643ad8bf6852778fc7"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:26a1dc6285e03f3cc9e839a2da83bcbf31dcb0d004c72d0730e755b33466c30e"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:18580f672e44ce1238b82f7fb87d727c4a131f3a9d33a5e0e82b793362bf18b4"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:29e0f83f37610f173eb7e7b5562dd71467993495e568e708d99e9d1944f561ec"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:1f23e4fe1e8794f74b6027d7cf19dc25f8b63af1483d91d595d4a07eca1fb26c"}, + {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:db8e58b9d79200c76956cefd14d5c90af54416ff5353c5bfd7cbe58818e26ef0"}, + {file = "yarl-1.9.4-cp39-cp39-win32.whl", hash = "sha256:c7224cab95645c7ab53791022ae77a4509472613e839dab722a72abe5a684575"}, + {file = "yarl-1.9.4-cp39-cp39-win_amd64.whl", hash = "sha256:824d6c50492add5da9374875ce72db7a0733b29c2394890aef23d533106e2b15"}, + {file = "yarl-1.9.4-py3-none-any.whl", hash = "sha256:928cecb0ef9d5a7946eb6ff58417ad2fe9375762382f1bf5c55e61645f2c43ad"}, + {file = "yarl-1.9.4.tar.gz", hash = "sha256:566db86717cf8080b99b58b083b773a908ae40f06681e87e589a976faf8246bf"}, +] + +[package.dependencies] +idna = ">=2.0" +multidict = ">=4.0" + [metadata] lock-version = "2.0" python-versions = "^3.12" -content-hash = "a30121364668554c09657a0659da85a871d7da09c87c76e4026d51b2659dce2b" +content-hash = "45ca91ca0163e88901c9c6e46ac554b7f4bc1398aafa30fd85120a8c7a18f6d2" diff --git a/projects/renku_data_service/pyproject.toml b/projects/renku_data_service/pyproject.toml index 4b21c1001..012c77ad8 100644 --- a/projects/renku_data_service/pyproject.toml +++ b/projects/renku_data_service/pyproject.toml @@ -29,6 +29,8 @@ packages = [ { include = "renku_data_services/storage", from = "../../components" }, { include = "renku_data_services/users", from = "../../components" }, { include = "renku_data_services/utils", from = "../../components" }, + { include = "renku_data_services/notebooks", from = "../../components" }, + { include = "renku_data_services/data_connectors", from = "../../components" }, # Note: poetry poly does not detect the migrations as dependencies, but they are. Don't remove these! { include = "renku_data_services/migrations", from = "../../components" }, ] @@ -55,11 +57,18 @@ redis = "^5.0.8" dataclasses-avroschema = "^0.63.0" undictify = "^0.11.3" prometheus-sanic = "^3.0.0" -sentry-sdk = {version = "^2.14.0", extras = ["sanic"]} -authzed = "^0.18.3" +sentry-sdk = {version = "^2.6.0", extras = ["sanic"]} +authzed = "^0.15.0" +cryptography = "^42.0.5" +kubernetes-asyncio = "^30.1.0" +marshmallow = "^3.21.3" +escapism = "^1.0.1" +kr8s = "^0.17.0" +werkzeug = "^3.0.3" # see https://github.com/sanic-org/sanic/issues/2828 for setuptools dependency, remove when not needed anymore setuptools = { version = "^75.1.0" } aiofile = "^3.8.8" +toml = "^0.10.2" [tool.poetry.group.dev.dependencies] pyavro-gen = "^0.3.3" diff --git a/projects/secrets_storage/Dockerfile b/projects/secrets_storage/Dockerfile index c9bcecb91..a5c86ebf5 100644 --- a/projects/secrets_storage/Dockerfile +++ b/projects/secrets_storage/Dockerfile @@ -27,7 +27,7 @@ RUN env/bin/pip --no-cache-dir install projects/secrets_storage/dist/*.whl FROM python:3.12-slim-bookworm ARG USER_UID=1000 ARG USER_GID=$USER_UID -ENV prometheus_multiproc_dir=/prometheus +ENV prometheus_multiproc_dir=/prometheus ENV PROMETHEUS_MULTIPROC_DIR=/prometheus RUN mkdir /prometheus && chown $USER_UID:$USER_GID /prometheus RUN apt-get update && apt-get install -y \ diff --git a/projects/secrets_storage/poetry.lock b/projects/secrets_storage/poetry.lock index 985b29227..f34d480e0 100644 --- a/projects/secrets_storage/poetry.lock +++ b/projects/secrets_storage/poetry.lock @@ -92,6 +92,20 @@ files = [ [package.extras] test = ["coverage", "mypy", "pexpect", "ruff", "wheel"] +[[package]] +name = "asyncache" +version = "0.3.1" +description = "Helpers to use cachetools with async code." +optional = false +python-versions = ">=3.8,<4.0" +files = [ + {file = "asyncache-0.3.1-py3-none-any.whl", hash = "sha256:ef20a1024d265090dd1e0785c961cf98b9c32cc7d9478973dcf25ac1b80011f5"}, + {file = "asyncache-0.3.1.tar.gz", hash = "sha256:9a1e60a75668e794657489bdea6540ee7e3259c483517b934670db7600bf5035"}, +] + +[package.dependencies] +cachetools = ">=5.2.0,<6.0.0" + [[package]] name = "asyncpg" version = "0.29.0" @@ -657,6 +671,17 @@ files = [ dnspython = ">=2.0.0" idna = ">=2.0.0" +[[package]] +name = "escapism" +version = "1.0.1" +description = "Simple, generic API for escaping strings." +optional = false +python-versions = "*" +files = [ + {file = "escapism-1.0.1-py2.py3-none-any.whl", hash = "sha256:d28f19edc3cb1ffc36fa238956ecc068695477e748f57157c6dde00a6b77f229"}, + {file = "escapism-1.0.1.tar.gz", hash = "sha256:73256bdfb4f22230f0428fc6efecee61cdc4fad531b6f98b849cb9c80711e4ec"}, +] + [[package]] name = "factory-boy" version = "3.3.0" @@ -1070,6 +1095,40 @@ http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] zstd = ["zstandard (>=0.18.0)"] +[[package]] +name = "httpx-ws" +version = "0.6.0" +description = "WebSockets support for HTTPX" +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx_ws-0.6.0-py3-none-any.whl", hash = "sha256:437cfca94519a4e6ae06eb5573192df6c0da85c22b1a19cc1ea0b02b05a51d25"}, + {file = "httpx_ws-0.6.0.tar.gz", hash = "sha256:60218f531fb474a2143af38568f4b7d94ba356780973443365c8e2c87882bb8c"}, +] + +[package.dependencies] +anyio = ">=4" +httpcore = ">=1.0.4" +httpx = ">=0.23.1" +wsproto = "*" + +[[package]] +name = "httpx-ws" +version = "0.6.0" +description = "WebSockets support for HTTPX" +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx_ws-0.6.0-py3-none-any.whl", hash = "sha256:437cfca94519a4e6ae06eb5573192df6c0da85c22b1a19cc1ea0b02b05a51d25"}, + {file = "httpx_ws-0.6.0.tar.gz", hash = "sha256:60218f531fb474a2143af38568f4b7d94ba356780973443365c8e2c87882bb8c"}, +] + +[package.dependencies] +anyio = ">=4" +httpcore = ">=1.0.4" +httpx = ">=0.23.1" +wsproto = "*" + [[package]] name = "idna" version = "3.7" @@ -1138,6 +1197,31 @@ MarkupSafe = ">=2.0" [package.extras] i18n = ["Babel (>=2.7)"] +[[package]] +name = "kr8s" +version = "0.17.2" +description = "A Kubernetes API library" +optional = false +python-versions = ">=3.8" +files = [ + {file = "kr8s-0.17.2-py3-none-any.whl", hash = "sha256:5beba0ccf08c7a2305c0fa8f85fa8d2fe7d3f265872f718489e1bea3162fa91b"}, + {file = "kr8s-0.17.2.tar.gz", hash = "sha256:536d08c3f701365e6ac5ce42c0e8313aa6e6740f92b7077f28209e892af046ab"}, +] + +[package.dependencies] +anyio = ">=3.7.0" +asyncache = ">=0.3.1" +cryptography = ">=35" +httpx = ">=0.24.1" +httpx-ws = ">=0.5.2" +python-box = ">=7.0.1" +python-jsonpath = ">=0.7.1" +pyyaml = ">=6.0" + +[package.extras] +docs = ["furo (>=2023.3.27)", "myst-parser (>=1.0.0)", "sphinx (>=5.3.0)", "sphinx-autoapi (>=2.1.0)", "sphinx-autobuild (>=2021.3.14)", "sphinx-copybutton (>=0.5.1)", "sphinx-design (>=0.3.0)", "sphinxcontrib-mermaid (>=0.8.1)"] +test = ["kubernetes (>=26.1.0)", "kubernetes-asyncio (>=24.2.3)", "kubernetes-validate (>=1.28.0)", "lightkube (>=0.13.0)", "pykube-ng (>=23.6.0)", "pytest (>=7.2.2)", "pytest-asyncio (>=0.20.3)", "pytest-cov (>=4.0.0)", "pytest-kind (>=22.11.1)", "pytest-rerunfailures (>=11.1.2)", "pytest-timeout (>=2.1.0)", "trio (>=0.22.0)", "types-pyyaml (>=6.0)"] + [[package]] name = "kubernetes" version = "31.0.0" @@ -1253,6 +1337,25 @@ files = [ {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, ] +[[package]] +name = "marshmallow" +version = "3.22.0" +description = "A lightweight library for converting complex datatypes to and from native Python datatypes." +optional = false +python-versions = ">=3.8" +files = [ + {file = "marshmallow-3.22.0-py3-none-any.whl", hash = "sha256:71a2dce49ef901c3f97ed296ae5051135fd3febd2bf43afe0ae9a82143a494d9"}, + {file = "marshmallow-3.22.0.tar.gz", hash = "sha256:4972f529104a220bb8637d595aa4c9762afbe7f7a77d82dc58c1615d70c5823e"}, +] + +[package.dependencies] +packaging = ">=17.0" + +[package.extras] +dev = ["marshmallow[tests]", "pre-commit (>=3.5,<4.0)", "tox"] +docs = ["alabaster (==1.0.0)", "autodocsumm (==0.2.13)", "sphinx (==8.0.2)", "sphinx-issues (==4.1.0)", "sphinx-version-warning (==1.1.2)"] +tests = ["pytest", "pytz", "simplejson"] + [[package]] name = "multidict" version = "6.0.5" @@ -1797,6 +1900,41 @@ dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pyte docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"] tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] +[[package]] +name = "python-box" +version = "7.2.0" +description = "Advanced Python dictionaries with dot notation access" +optional = false +python-versions = ">=3.8" +files = [ + {file = "python_box-7.2.0-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:6bdeec791e25258351388b3029a3ec5da302bb9ed3be175493c43cdc6c47f5e3"}, + {file = "python_box-7.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c449f7b3756a71479fa9c61a86e344ac00ed782a66d7662590f0afa294249d18"}, + {file = "python_box-7.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:6b0d61f182d394106d963232854e495b51edc178faa5316a797be1178212d7e0"}, + {file = "python_box-7.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e2d752de8c1204255bf7b0c814c59ef48293c187a7e9fdcd2fefa28024b72032"}, + {file = "python_box-7.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8a6c35ea356a386077935958a5debcd5b229b9a1b3b26287a52dfe1a7e65d99"}, + {file = "python_box-7.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:32ed58ec4d9e5475efe69f9c7d773dfea90a6a01979e776da93fd2b0a5d04429"}, + {file = "python_box-7.2.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:2a2d664c6a27f7515469b6f1e461935a2038ee130b7d194b4b4db4e85d363618"}, + {file = "python_box-7.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8a5a7365db1aaf600d3e8a2747fcf6833beb5d45439a54318548f02e302e3ec"}, + {file = "python_box-7.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:739f827056ea148cbea3122d4617c994e829b420b1331183d968b175304e3a4f"}, + {file = "python_box-7.2.0-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:2617ef3c3d199f55f63c908f540a4dc14ced9b18533a879e6171c94a6a436f23"}, + {file = "python_box-7.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffd866bed03087b1d8340014da8c3aaae19135767580641df1b4ae6fff6ac0aa"}, + {file = "python_box-7.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:9681f059e7e92bdf20782cd9ea6e533d4711fc7b8c57a462922a025d46add4d0"}, + {file = "python_box-7.2.0-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:6b59b1e2741c9ceecdf5a5bd9b90502c24650e609cd824d434fed3b6f302b7bb"}, + {file = "python_box-7.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e23fae825d809ae7520fdeac88bb52be55a3b63992120a00e381783669edf589"}, + {file = "python_box-7.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:573b1abdcb7bd745fa404444f060ee62fc35a74f067181e55dcb43cfe92f2827"}, + {file = "python_box-7.2.0-py3-none-any.whl", hash = "sha256:a3c90832dd772cb0197fdb5bc06123b6e1b846899a1b53d9c39450d27a584829"}, + {file = "python_box-7.2.0.tar.gz", hash = "sha256:551af20bdab3a60a2a21e3435120453c4ca32f7393787c3a5036e1d9fc6a0ede"}, +] + +[package.extras] +all = ["msgpack", "ruamel.yaml (>=0.17)", "toml"] +msgpack = ["msgpack"] +pyyaml = ["PyYAML"] +ruamel-yaml = ["ruamel.yaml (>=0.17)"] +toml = ["toml"] +tomli = ["tomli", "tomli-w"] +yaml = ["ruamel.yaml (>=0.17)"] + [[package]] name = "python-dateutil" version = "2.9.0.post0" @@ -1831,6 +1969,17 @@ autocompletion = ["argcomplete (>=1.10.0,<3)"] graphql = ["gql[httpx] (>=3.5.0,<4)"] yaml = ["PyYaml (>=6.0.1)"] +[[package]] +name = "python-jsonpath" +version = "1.2.0" +description = "JSONPath, JSON Pointer and JSON Patch for Python." +optional = false +python-versions = ">=3.7" +files = [ + {file = "python_jsonpath-1.2.0-py3-none-any.whl", hash = "sha256:3172c7b87098fced1ed84bd3492bd1a19ef1ad41d4f5b8a3e9a147c750ac08b3"}, + {file = "python_jsonpath-1.2.0.tar.gz", hash = "sha256:a29a84ec3ac38e5dcaa62ac2a215de72c4eb60cb1303e10700da980cf7873775"}, +] + [[package]] name = "python-ulid" version = "2.7.0" @@ -2333,6 +2482,17 @@ files = [ doc = ["reno", "sphinx"] test = ["pytest", "tornado (>=4.5)", "typeguard"] +[[package]] +name = "toml" +version = "0.10.2" +description = "Python Library for Tom's Obvious, Minimal Language" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, + {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, +] + [[package]] name = "tracerite" version = "1.1.1" @@ -2680,6 +2840,37 @@ files = [ {file = "websockets-12.0.tar.gz", hash = "sha256:81df9cbcbb6c260de1e007e58c011bfebe2dafc8435107b0537f393dd38c8b1b"}, ] +[[package]] +name = "werkzeug" +version = "3.0.4" +description = "The comprehensive WSGI web application library." +optional = false +python-versions = ">=3.8" +files = [ + {file = "werkzeug-3.0.4-py3-none-any.whl", hash = "sha256:02c9eb92b7d6c06f31a782811505d2157837cea66aaede3e217c7c27c039476c"}, + {file = "werkzeug-3.0.4.tar.gz", hash = "sha256:34f2371506b250df4d4f84bfe7b0921e4762525762bbd936614909fe25cd7306"}, +] + +[package.dependencies] +MarkupSafe = ">=2.1.1" + +[package.extras] +watchdog = ["watchdog (>=2.3)"] + +[[package]] +name = "wsproto" +version = "1.2.0" +description = "WebSockets state-machine based protocol implementation" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "wsproto-1.2.0-py3-none-any.whl", hash = "sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736"}, + {file = "wsproto-1.2.0.tar.gz", hash = "sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065"}, +] + +[package.dependencies] +h11 = ">=0.9.0,<1" + [metadata] lock-version = "2.0" python-versions = "^3.12" diff --git a/projects/secrets_storage/pyproject.toml b/projects/secrets_storage/pyproject.toml index 3f442de7c..57be935ff 100644 --- a/projects/secrets_storage/pyproject.toml +++ b/projects/secrets_storage/pyproject.toml @@ -31,6 +31,8 @@ packages = [ { include = "renku_data_services/storage", from = "../../components" }, { include = "renku_data_services/users", from = "../../components" }, { include = "renku_data_services/utils", from = "../../components" }, + { include = "renku_data_services/notebooks", from = "../../components" }, + { include = "renku_data_services/data_connectors", from = "../../components" }, ] [tool.poetry.dependencies] @@ -62,6 +64,11 @@ authzed = "^0.18.3" # see https://github.com/sanic-org/sanic/issues/2828 for setuptools dependency, remove when not needed anymore setuptools = { version = "^75.1.0" } aiofile = "^3.8.8" +escapism = "^1.0.1" +kr8s = "^0.17.2" +marshmallow = "^3.22.0" +toml = "^0.10.2" +werkzeug = "^3.0.4" [tool.poetry.group.dev.dependencies] pyavro-gen = "^0.3.3" diff --git a/pyproject.toml b/pyproject.toml index cb0c13403..281b664dd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,6 +32,7 @@ packages = [ { include = "renku_data_services/repositories", from = "components" }, { include = "renku_data_services/notebooks", from = "components" }, { include = "renku_data_services/platform", from = "components" }, + { include = "renku_data_services/data_connectors", from = "components" }, ] [tool.poetry.dependencies] @@ -61,11 +62,14 @@ prometheus_client = "^0.7.1" kubernetes-asyncio = "^31.1.0" marshmallow = "^3.21.3" escapism = "^1.0.1" -sentry-sdk = { version = "^2.14.0", extras = ["sanic"] } -authzed = "^0.18.3" -cryptography = "^43.0.1" +kr8s = "^0.17.0" +werkzeug = "^3.0.3" +sentry-sdk = { version = "^2.7.1", extras = ["sanic"] } +authzed = "^0.16.0" +cryptography = "^42.0.8" # see https://github.com/sanic-org/sanic/issues/2828 for setuptools dependency, remove when not needed anymore setuptools = { version = "^75.1.0" } +toml = "^0.10.2" aiofiles = "^24.1.0" [tool.poetry.group.dev.dependencies] @@ -87,8 +91,10 @@ fakeredis = "^2.24.1" ruff = "^0.6.7" debugpy = "^1.8.2" pytest-xdist = { version = "^3.5.0", extras = ["psutil"] } -types-requests = "^2.32.0.20240914" +types-requests = "^2.32.0.20240622" +types-toml = "^0.10.8.20240310" types-aiofiles = "^24.1.0.20240626" +pytest-mock = "^3.14.0" [build-system] requires = ["poetry-core"] @@ -99,7 +105,11 @@ line-length = 120 target-version = "py311" output-format = "full" include = ["*.py", "*.pyi"] -exclude = ["*/avro_models/*"] +exclude = [ + "*/avro_models/*", + "components/renku_data_services/notebooks/cr_amalthea_session.py", + "components/renku_data_services/notebooks/cr_jupyter_server.py", +] [tool.ruff.format] exclude = ["apispec.py"] @@ -138,6 +148,7 @@ ignore = [ "test/*" = ["D"] "*/versions/*" = ["D", "E", "W"] "apispec.py" = ["D", "E", "W", "I", "UP"] +"components/renku_data_services/notebooks/crs.py" = ["F401"] [tool.ruff.lint.isort] known-first-party = ["renku_data_services"] @@ -147,7 +158,12 @@ convention = "google" [tool.bandit] skips = ["B101", "B603", "B607", "B404"] -exclude_dirs = ["test", ".devcontainer"] +exclude_dirs = [ + "test", + ".devcontainer", + "components/renku_data_services/notebooks/cr_jupyter_server.py", + "components/renku_data_services/notebooks/cr_amalthea_session.py", +] [tool.pytest.ini_options] addopts = "--cov components/ --cov bases/ --cov-report=term-missing -v" @@ -191,6 +207,7 @@ disallow_untyped_defs = true module = [ "renku_data_services.crc.apispec", "renku_data_services.connected_services.apispec", + "renku_data_services.data_connectors.apispec", "renku_data_services.storage.apispec", "renku_data_services.project.apispec", "renku_data_services.repositories.apispec", @@ -200,6 +217,8 @@ module = [ "renku_data_services.data_api.error_handler", "renku_data_services.namespace.apispec", "renku_data_services.notebooks.apispec", + "renku_data_services.notebooks.cr_amalthea_session", + "renku_data_services.notebooks.cr_jupyter_server", "renku_data_services.platform.apispec", ] ignore_errors = true @@ -224,6 +243,7 @@ module = [ "undictify.*", "urllib3.*", "escapism.*", + "kr8s.*", ] ignore_missing_imports = true diff --git a/server_defaults.json b/server_defaults.json new file mode 100644 index 000000000..1050fbedd --- /dev/null +++ b/server_defaults.json @@ -0,0 +1,8 @@ +{ + "defaultUrl": "/lab", + "cpu_request": 0.5, + "mem_request": "1G", + "disk_request": "1G", + "gpu_request": 0, + "lfs_auto_fetch": false +} diff --git a/server_options.json b/server_options.json new file mode 100644 index 000000000..8a3b58692 --- /dev/null +++ b/server_options.json @@ -0,0 +1,56 @@ +{ + "defaultUrl": { + "order": 1, + "displayName": "Default Environment", + "type": "enum", + "default": "/lab", + "options": [ + "/lab" + ] + }, + "cpu_request": { + "order": 2, + "displayName": "Number of CPUs", + "type": "enum", + "default": 0.5, + "options": [ + 0.5, + 1 + ] + }, + "mem_request": { + "order": 3, + "displayName": "Amount of Memory", + "type": "enum", + "default": "1G", + "options": [ + "1G", + "2G" + ] + }, + "disk_request": { + "order": 4, + "displayName": "Amount of Storage", + "type": "enum", + "default": "1G", + "options": [ + "1G", + "10G" + ] + }, + "gpu_request": { + "order": 5, + "displayName": "Number of GPUs", + "type": "enum", + "default": 0, + "options": [ + 0 + ] + }, + "lfs_auto_fetch": { + "order": 6, + "displayName": "Automatically fetch LFS data", + "type": "boolean", + "default": false + } +} diff --git a/test/bases/renku_data_services/data_api/conftest.py b/test/bases/renku_data_services/data_api/conftest.py index 5a79e35f4..c639733b4 100644 --- a/test/bases/renku_data_services/data_api/conftest.py +++ b/test/bases/renku_data_services/data_api/conftest.py @@ -251,7 +251,18 @@ async def create_resource_pool_helper(admin: bool = False, **payload) -> dict[st "default": True, "node_affinities": [], "tolerations": [], - } + }, + { + "cpu": 2.0, + "memory": 20, + "gpu": 0, + "name": "test-class-name", + "max_storage": 200, + "default_storage": 2, + "default": False, + "node_affinities": [], + "tolerations": [], + }, ], "quota": {"cpu": 100, "memory": 100, "gpu": 0}, "default": False, diff --git a/test/bases/renku_data_services/data_api/test_data_connectors.py b/test/bases/renku_data_services/data_api/test_data_connectors.py new file mode 100644 index 000000000..5c8a810ef --- /dev/null +++ b/test/bases/renku_data_services/data_api/test_data_connectors.py @@ -0,0 +1,1178 @@ +from typing import Any + +import pytest +from sanic_testing.testing import SanicASGITestClient + +from renku_data_services.users.models import UserInfo +from test.bases.renku_data_services.data_api.utils import merge_headers + + +@pytest.fixture +def create_data_connector(sanic_client: SanicASGITestClient, regular_user, user_headers): + async def create_data_connector_helper( + name: str, user: UserInfo | None = None, headers: dict[str, str] | None = None, **payload + ) -> dict[str, Any]: + user = user or regular_user + headers = headers or user_headers + dc_payload = { + "name": name, + "description": "A data connector", + "visibility": "private", + "namespace": f"{user.first_name}.{user.last_name}", + "storage": { + "configuration": { + "type": "s3", + "provider": "AWS", + "region": "us-east-1", + }, + "source_path": "bucket/my-folder", + "target_path": "my/target", + }, + "keywords": ["keyword 1", "keyword.2", "keyword-3", "KEYWORD_4"], + } + dc_payload.update(payload) + + _, response = await sanic_client.post("/api/data/data_connectors", headers=headers, json=dc_payload) + + assert response.status_code == 201, response.text + return response.json + + return create_data_connector_helper + + +@pytest.mark.asyncio +async def test_post_data_connector(sanic_client: SanicASGITestClient, regular_user, user_headers) -> None: + payload = { + "name": "My data connector", + "slug": "my-data-connector", + "description": "A data connector", + "visibility": "public", + "namespace": f"{regular_user.first_name}.{regular_user.last_name}", + "storage": { + "configuration": { + "type": "s3", + "provider": "AWS", + "region": "us-east-1", + }, + "source_path": "bucket/my-folder", + "target_path": "my/target", + }, + "keywords": ["keyword 1", "keyword.2", "keyword-3", "KEYWORD_4"], + } + + _, response = await sanic_client.post("/api/data/data_connectors", headers=user_headers, json=payload) + + assert response.status_code == 201, response.text + assert response.json is not None + data_connector = response.json + assert data_connector.get("name") == "My data connector" + assert data_connector.get("namespace") == "user.doe" + assert data_connector.get("slug") == "my-data-connector" + assert data_connector.get("storage") is not None + storage = data_connector["storage"] + assert storage.get("storage_type") == "s3" + assert storage.get("source_path") == "bucket/my-folder" + assert storage.get("target_path") == "my/target" + assert storage.get("readonly") is True + assert data_connector.get("created_by") == "user" + assert data_connector.get("visibility") == "public" + assert data_connector.get("description") == "A data connector" + assert set(data_connector.get("keywords")) == {"keyword 1", "keyword.2", "keyword-3", "KEYWORD_4"} + + # Check that we can retrieve the data connector + _, response = await sanic_client.get(f"/api/data/data_connectors/{data_connector["id"]}", headers=user_headers) + assert response.status_code == 200, response.text + assert response.json is not None + assert response.json.get("id") == data_connector["id"] + + # Check that we can retrieve the data connector by slug + _, response = await sanic_client.get( + f"/api/data/namespaces/{data_connector["namespace"]}/data_connectors/{data_connector["slug"]}", + headers=user_headers, + ) + assert response.status_code == 200, response.text + assert response.json is not None + assert response.json.get("id") == data_connector["id"] + + +@pytest.mark.asyncio +async def test_post_data_connector_with_s3_url(sanic_client: SanicASGITestClient, regular_user, user_headers) -> None: + payload = { + "name": "My data connector", + "slug": "my-data-connector", + "description": "A data connector", + "visibility": "public", + "namespace": f"{regular_user.first_name}.{regular_user.last_name}", + "storage": { + "storage_url": "s3://my-bucket", + "target_path": "my/target", + }, + "keywords": ["keyword 1", "keyword.2", "keyword-3", "KEYWORD_4"], + } + + _, response = await sanic_client.post("/api/data/data_connectors", headers=user_headers, json=payload) + + assert response.status_code == 201, response.text + assert response.json is not None + data_connector = response.json + assert data_connector.get("name") == "My data connector" + assert data_connector.get("namespace") == "user.doe" + assert data_connector.get("slug") == "my-data-connector" + assert data_connector.get("storage") is not None + storage = data_connector["storage"] + assert storage.get("storage_type") == "s3" + assert storage.get("source_path") == "my-bucket" + assert storage.get("target_path") == "my/target" + assert storage.get("readonly") is True + assert data_connector.get("created_by") == "user" + assert data_connector.get("visibility") == "public" + assert data_connector.get("description") == "A data connector" + assert set(data_connector.get("keywords")) == {"keyword 1", "keyword.2", "keyword-3", "KEYWORD_4"} + + +@pytest.mark.asyncio +async def test_post_data_connector_with_azure_url( + sanic_client: SanicASGITestClient, regular_user, user_headers +) -> None: + payload = { + "name": "My data connector", + "slug": "my-data-connector", + "description": "A data connector", + "visibility": "public", + "namespace": f"{regular_user.first_name}.{regular_user.last_name}", + "storage": { + "storage_url": "azure://mycontainer/myfolder", + "target_path": "my/target", + }, + "keywords": ["keyword 1", "keyword.2", "keyword-3", "KEYWORD_4"], + } + + _, response = await sanic_client.post("/api/data/data_connectors", headers=user_headers, json=payload) + + assert response.status_code == 201, response.text + assert response.json is not None + data_connector = response.json + assert data_connector.get("name") == "My data connector" + assert data_connector.get("namespace") == "user.doe" + assert data_connector.get("slug") == "my-data-connector" + assert data_connector.get("storage") is not None + storage = data_connector["storage"] + assert storage.get("storage_type") == "azureblob" + assert storage.get("source_path") == "mycontainer/myfolder" + assert storage.get("target_path") == "my/target" + assert storage.get("readonly") is True + assert data_connector.get("created_by") == "user" + assert data_connector.get("visibility") == "public" + assert data_connector.get("description") == "A data connector" + assert set(data_connector.get("keywords")) == {"keyword 1", "keyword.2", "keyword-3", "KEYWORD_4"} + + +@pytest.mark.asyncio +async def test_post_data_connector_with_invalid_visibility(sanic_client: SanicASGITestClient, user_headers) -> None: + payload = {"visibility": "random"} + + _, response = await sanic_client.post("/api/data/data_connectors", headers=user_headers, json=payload) + + assert response.status_code == 422, response.text + assert "visibility: Input should be 'private' or 'public'" in response.json["error"]["message"] + + +@pytest.mark.asyncio +@pytest.mark.parametrize("keyword", ["invalid chars '", "Nön English"]) +async def test_post_data_connector_with_invalid_keywords( + sanic_client: SanicASGITestClient, user_headers, keyword +) -> None: + payload = {"keywords": [keyword]} + + _, response = await sanic_client.post("/api/data/data_connectors", headers=user_headers, json=payload) + + assert response.status_code == 422, response.text + assert "String should match pattern '^[A-Za-z0-9\\s\\-_.]*$'" in response.json["error"]["message"] + + +@pytest.mark.asyncio +async def test_post_data_connector_with_invalid_namespace( + sanic_client: SanicASGITestClient, user_headers, member_1_user +) -> None: + namespace = f"{member_1_user.first_name}.{member_1_user.last_name}" + _, response = await sanic_client.get(f"/api/data/namespaces/{namespace}", headers=user_headers) + assert response.status_code == 200, response.text + + payload = { + "name": "My data connector", + "namespace": namespace, + "storage": { + "configuration": { + "type": "s3", + "provider": "AWS", + "region": "us-east-1", + }, + "source_path": "bucket/my-folder", + "target_path": "my/target", + }, + } + _, response = await sanic_client.post("/api/data/data_connectors", headers=user_headers, json=payload) + + assert response.status_code == 403, response.text + assert "you do not have sufficient permissions" in response.json["error"]["message"] + + +@pytest.mark.asyncio +async def test_post_data_connector_with_conflicting_slug( + sanic_client: SanicASGITestClient, create_data_connector, user_headers +) -> None: + data_connector_1 = await create_data_connector("Data connector 1") + + payload = { + "name": "My data connector", + "namespace": data_connector_1["namespace"], + "slug": data_connector_1["slug"], + "storage": { + "configuration": { + "type": "s3", + "provider": "AWS", + }, + "source_path": "bucket/my-folder", + "target_path": "my/target", + }, + } + _, response = await sanic_client.post("/api/data/data_connectors", headers=user_headers, json=payload) + + assert response.status_code == 409, response.text + + +@pytest.mark.asyncio +@pytest.mark.parametrize("headers_and_error", [("unauthorized_headers", 401), ("member_1_headers", 403)]) +async def test_post_data_connector_without_namespace_permission( + sanic_client: SanicASGITestClient, user_headers, headers_and_error, request +) -> None: + headers_name, status_code = headers_and_error + + _, response = await sanic_client.post( + "/api/data/groups", headers=user_headers, json={"name": "My Group", "slug": "my-group"} + ) + assert response.status_code == 201, response.text + + headers = request.getfixturevalue(headers_name) + payload = { + "name": "My data connector", + "namespace": "my-group", + "storage": { + "configuration": { + "type": "s3", + "provider": "AWS", + }, + "source_path": "bucket/my-folder", + "target_path": "my/target", + }, + } + _, response = await sanic_client.post("/api/data/data_connectors", headers=headers, json=payload) + + assert response.status_code == status_code, response.text + + +@pytest.mark.asyncio +async def test_post_data_connector_with_namespace_permission( + sanic_client: SanicASGITestClient, user_headers, member_1_headers, member_1_user +) -> None: + _, response = await sanic_client.post( + "/api/data/groups", headers=user_headers, json={"name": "My Group", "slug": "my-group"} + ) + assert response.status_code == 201, response.text + patch = [{"id": member_1_user.id, "role": "editor"}] + _, response = await sanic_client.patch("/api/data/groups/my-group/members", headers=user_headers, json=patch) + assert response.status_code == 200 + + payload = { + "name": "My data connector", + "namespace": "my-group", + "storage": { + "configuration": { + "type": "s3", + "provider": "AWS", + }, + "source_path": "bucket/my-folder", + "target_path": "my/target", + }, + } + _, response = await sanic_client.post("/api/data/data_connectors", headers=member_1_headers, json=payload) + + assert response.status_code == 201, response.text + + +@pytest.mark.asyncio +async def test_get_all_data_connectors_pagination( + sanic_client: SanicASGITestClient, create_data_connector, user_headers +) -> None: + for i in range(1, 10): + await create_data_connector(f"Data connector {i}") + + parameters = {"page": 2, "per_page": 3} + _, response = await sanic_client.get("/api/data/data_connectors", headers=user_headers, params=parameters) + + assert response.status_code == 200, response.text + assert response.json is not None + data_connectors = response.json + assert {dc["name"] for dc in data_connectors} == { + "Data connector 4", + "Data connector 5", + "Data connector 6", + } + assert response.headers["page"] == "2" + assert response.headers["per-page"] == "3" + assert response.headers["total"] == "9" + assert response.headers["total-pages"] == "3" + + +@pytest.mark.asyncio +async def test_get_one_data_connector(sanic_client: SanicASGITestClient, create_data_connector, user_headers) -> None: + data_connector = await create_data_connector("A new data connector") + data_connector_id = data_connector["id"] + + _, response = await sanic_client.get(f"/api/data/data_connectors/{data_connector_id}", headers=user_headers) + + assert response.status_code == 200, response.text + assert response.json is not None + data_connector = response.json + assert data_connector.get("id") == data_connector_id + assert data_connector.get("name") == "A new data connector" + assert data_connector.get("namespace") == "user.doe" + assert data_connector.get("slug") == "a-new-data-connector" + + +@pytest.mark.asyncio +async def test_get_one_by_slug_data_connector( + sanic_client: SanicASGITestClient, create_data_connector, user_headers +) -> None: + data_connector = await create_data_connector("A new data connector") + namespace = data_connector["namespace"] + slug = data_connector["slug"] + + _, response = await sanic_client.get( + f"/api/data/namespaces/{namespace}/data_connectors/{slug}", headers=user_headers + ) + + assert response.status_code == 200, response.text + assert response.json is not None + data_connector = response.json + assert data_connector.get("id") == data_connector["id"] + assert data_connector.get("name") == "A new data connector" + assert data_connector.get("namespace") == "user.doe" + assert data_connector.get("slug") == "a-new-data-connector" + + +@pytest.mark.asyncio +@pytest.mark.parametrize("headers_name", ["unauthorized_headers", "member_1_headers"]) +async def test_get_one_data_connector_unauthorized( + sanic_client: SanicASGITestClient, create_data_connector, headers_name, request +) -> None: + data_connector = await create_data_connector("A new data connector") + data_connector_id = data_connector["id"] + + headers = request.getfixturevalue(headers_name) + _, response = await sanic_client.get(f"/api/data/data_connectors/{data_connector_id}", headers=headers) + + assert response.status_code == 404, response.text + + +@pytest.mark.asyncio +async def test_patch_data_connector(sanic_client: SanicASGITestClient, create_data_connector, user_headers) -> None: + data_connector = await create_data_connector("My data connector") + + headers = merge_headers(user_headers, {"If-Match": data_connector["etag"]}) + patch = { + "name": "New Name", + "description": "Updated data connector", + "keywords": ["keyword 1", "keyword 2"], + "visibility": "public", + "storage": { + "configuration": {"type": "azureblob"}, + "source_path": "new/src", + "target_path": "new/target", + "readonly": False, + }, + } + data_connector_id = data_connector["id"] + _, response = await sanic_client.patch( + f"/api/data/data_connectors/{data_connector_id}", headers=headers, json=patch + ) + + assert response.status_code == 200, response.text + assert response.json is not None + data_connector = response.json + assert data_connector.get("name") == "New Name" + assert data_connector.get("namespace") == "user.doe" + assert data_connector.get("slug") == "my-data-connector" + assert data_connector.get("storage") is not None + storage = data_connector["storage"] + assert storage.get("storage_type") == "azureblob" + assert storage.get("source_path") == "new/src" + assert storage.get("target_path") == "new/target" + assert storage.get("readonly") is False + assert data_connector.get("created_by") == "user" + assert data_connector.get("visibility") == "public" + assert data_connector.get("description") == "Updated data connector" + assert set(data_connector.get("keywords")) == {"keyword 1", "keyword 2"} + + +@pytest.mark.asyncio +async def test_patch_data_connector_can_unset_storage_field( + sanic_client: SanicASGITestClient, create_data_connector, user_headers +) -> None: + initial_storage = { + "configuration": { + "provider": "AWS", + "type": "s3", + "region": "us-east-1", + "access_key_id": "ACCESS KEY", + "secret_access_key": "SECRET", + }, + "source_path": "my-bucket", + "target_path": "my_data", + } + data_connector = await create_data_connector("My data connector", storage=initial_storage) + + headers = merge_headers(user_headers, {"If-Match": data_connector["etag"]}) + data_connector_id = data_connector["id"] + patch = {"storage": {"configuration": {"region": None, "access_key_id": None, "secret_access_key": None}}} + _, response = await sanic_client.patch( + f"/api/data/data_connectors/{data_connector_id}", headers=headers, json=patch + ) + + assert response.status_code == 200, response.text + assert response.json is not None + new_configuration = response.json["storage"]["configuration"] + assert new_configuration is not None + assert new_configuration["provider"] == "AWS" + assert new_configuration["type"] == "s3" + assert "region" not in new_configuration + assert "access_key_id" not in new_configuration + assert "secret_access_key" not in new_configuration + assert len(response.json["storage"]["sensitive_fields"]) == 0 + + +@pytest.mark.asyncio +async def test_patch_data_connector_visibility_to_private_hides_data_connector( + sanic_client: SanicASGITestClient, create_data_connector, user_headers +) -> None: + data_connector = await create_data_connector("My data connector", visibility="public") + + _, response = await sanic_client.get("/api/data/data_connectors") + assert response.status_code == 200, response.text + assert response.json is not None + assert response.json[0]["name"] == "My data connector" + + headers = merge_headers(user_headers, {"If-Match": data_connector["etag"]}) + patch = { + "visibility": "private", + } + data_connector_id = data_connector["id"] + _, response = await sanic_client.patch( + f"/api/data/data_connectors/{data_connector_id}", headers=headers, json=patch + ) + assert response.status_code == 200, response.text + + _, response = await sanic_client.get("/api/data/data_connectors") + + assert len(response.json) == 0 + + +@pytest.mark.asyncio +async def test_patch_data_connector_visibility_to_public_shows_data_connector( + sanic_client: SanicASGITestClient, create_data_connector, user_headers +) -> None: + data_connector = await create_data_connector("My data connector", visibility="private") + + _, response = await sanic_client.get("/api/data/data_connectors") + assert response.status_code == 200, response.text + assert response.json is not None + assert len(response.json) == 0 + + headers = merge_headers(user_headers, {"If-Match": data_connector["etag"]}) + patch = { + "visibility": "public", + } + data_connector_id = data_connector["id"] + _, response = await sanic_client.patch( + f"/api/data/data_connectors/{data_connector_id}", headers=headers, json=patch + ) + assert response.status_code == 200, response.text + + _, response = await sanic_client.get("/api/data/data_connectors") + + assert response.status_code == 200, response.text + assert response.json is not None + assert response.json[0]["name"] == "My data connector" + + +@pytest.mark.asyncio +@pytest.mark.parametrize("field", ["id", "created_by", "creation_date"]) +async def test_patch_data_connector_reserved_fields_are_forbidden( + sanic_client: SanicASGITestClient, create_data_connector, user_headers, field +) -> None: + data_connector = await create_data_connector("My data connector") + original_value = data_connector[field] + + headers = merge_headers(user_headers, {"If-Match": data_connector["etag"]}) + patch = { + field: "new-value", + } + data_connector_id = data_connector["id"] + _, response = await sanic_client.patch( + f"/api/data/data_connectors/{data_connector_id}", headers=headers, json=patch + ) + + assert response.status_code == 422, response.text + assert f"{field}: Extra inputs are not permitted" in response.text + + # Check that the field's value didn't change + _, response = await sanic_client.get(f"/api/data/data_connectors/{data_connector_id}", headers=user_headers) + assert response.status_code == 200, response.text + data_connector = response.json + assert data_connector[field] == original_value + + +@pytest.mark.asyncio +async def test_patch_data_connector_without_if_match_header( + sanic_client: SanicASGITestClient, create_data_connector, user_headers +) -> None: + data_connector = await create_data_connector("My data connector") + original_value = data_connector["name"] + + patch = { + "name": "New Name", + } + data_connector_id = data_connector["id"] + _, response = await sanic_client.patch( + f"/api/data/data_connectors/{data_connector_id}", headers=user_headers, json=patch + ) + + assert response.status_code == 428, response.text + assert "If-Match header not provided" in response.text + + # Check that the field's value didn't change + _, response = await sanic_client.get(f"/api/data/data_connectors/{data_connector_id}", headers=user_headers) + assert response.status_code == 200, response.text + data_connector = response.json + assert data_connector["name"] == original_value + + +@pytest.mark.asyncio +async def test_patch_data_connector_namespace( + sanic_client: SanicASGITestClient, create_data_connector, user_headers +) -> None: + _, response = await sanic_client.post( + "/api/data/groups", headers=user_headers, json={"name": "My Group", "slug": "my-group"} + ) + assert response.status_code == 201, response.text + data_connector = await create_data_connector("My data connector") + + headers = merge_headers(user_headers, {"If-Match": data_connector["etag"]}) + patch = {"namespace": "my-group"} + data_connector_id = data_connector["id"] + _, response = await sanic_client.patch( + f"/api/data/data_connectors/{data_connector_id}", headers=headers, json=patch + ) + + assert response.status_code == 200, response.text + assert response.json is not None + data_connector = response.json + assert data_connector.get("id") == data_connector_id + assert data_connector.get("name") == "My data connector" + assert data_connector.get("namespace") == "my-group" + assert data_connector.get("slug") == "my-data-connector" + + # Check that we can retrieve the data connector by slug + _, response = await sanic_client.get( + f"/api/data/namespaces/{data_connector["namespace"]}/data_connectors/{data_connector["slug"]}", + headers=user_headers, + ) + assert response.status_code == 200, response.text + assert response.json is not None + assert response.json.get("id") == data_connector["id"] + + +@pytest.mark.asyncio +async def test_patch_data_connector_with_invalid_namespace( + sanic_client: SanicASGITestClient, create_data_connector, user_headers, member_1_user +) -> None: + namespace = f"{member_1_user.first_name}.{member_1_user.last_name}" + _, response = await sanic_client.get(f"/api/data/namespaces/{namespace}", headers=user_headers) + assert response.status_code == 200, response.text + data_connector = await create_data_connector("My data connector") + + headers = merge_headers(user_headers, {"If-Match": data_connector["etag"]}) + patch = { + "namespace": namespace, + } + data_connector_id = data_connector["id"] + _, response = await sanic_client.patch( + f"/api/data/data_connectors/{data_connector_id}", headers=headers, json=patch + ) + + assert response.status_code == 403, response.text + assert "you do not have sufficient permissions" in response.json["error"]["message"] + + +@pytest.mark.asyncio +async def test_delete_data_connector(sanic_client: SanicASGITestClient, create_data_connector, user_headers) -> None: + await create_data_connector("Data connector 1") + data_connector = await create_data_connector("Data connector 2") + await create_data_connector("Data connector 3") + + data_connector_id = data_connector["id"] + _, response = await sanic_client.delete(f"/api/data/data_connectors/{data_connector_id}", headers=user_headers) + + assert response.status_code == 204, response.text + + _, response = await sanic_client.get("/api/data/data_connectors", headers=user_headers) + + assert response.status_code == 200, response.text + assert {dc["name"] for dc in response.json} == {"Data connector 1", "Data connector 3"} + + +@pytest.mark.asyncio +async def test_get_data_connector_project_links_empty( + sanic_client: SanicASGITestClient, create_data_connector, user_headers +) -> None: + data_connector = await create_data_connector("Data connector 1") + + data_connector_id = data_connector["id"] + _, response = await sanic_client.get( + f"/api/data/data_connectors/{data_connector_id}/project_links", headers=user_headers + ) + + assert response.status_code == 200, response.text + assert response.json is not None + assert len(response.json) == 0 + + +@pytest.mark.asyncio +async def test_post_data_connector_project_link( + sanic_client: SanicASGITestClient, create_data_connector, create_project, user_headers +) -> None: + data_connector = await create_data_connector("Data connector 1") + project = await create_project("Project A") + + data_connector_id = data_connector["id"] + project_id = project["id"] + payload = {"project_id": project_id} + _, response = await sanic_client.post( + f"/api/data/data_connectors/{data_connector_id}/project_links", headers=user_headers, json=payload + ) + + assert response.status_code == 201, response.text + assert response.json is not None + link = response.json + assert link.get("data_connector_id") == data_connector_id + assert link.get("project_id") == project_id + assert link.get("created_by") == "user" + + # Check that the links list from the data connector is not empty now + _, response = await sanic_client.get( + f"/api/data/data_connectors/{data_connector_id}/project_links", headers=user_headers + ) + + assert response.status_code == 200, response.text + assert response.json is not None + assert len(response.json) == 1 + assert response.json[0].get("id") == link["id"] + assert response.json[0].get("data_connector_id") == data_connector_id + assert response.json[0].get("project_id") == project_id + + # Check that the links list to the project is not empty now + _, response = await sanic_client.get(f"/api/data/projects/{project_id}/data_connector_links", headers=user_headers) + + assert response.status_code == 200, response.text + assert response.json is not None + assert len(response.json) == 1 + assert response.json[0].get("id") == link["id"] + assert response.json[0].get("data_connector_id") == data_connector_id + assert response.json[0].get("project_id") == project_id + + +@pytest.mark.asyncio +async def test_post_data_connector_project_link_already_exists( + sanic_client: SanicASGITestClient, create_data_connector, create_project, user_headers +) -> None: + data_connector = await create_data_connector("Data connector 1") + project = await create_project("Project A") + data_connector_id = data_connector["id"] + project_id = project["id"] + payload = {"project_id": project_id} + _, response = await sanic_client.post( + f"/api/data/data_connectors/{data_connector_id}/project_links", headers=user_headers, json=payload + ) + assert response.status_code == 201, response.text + + _, response = await sanic_client.post( + f"/api/data/data_connectors/{data_connector_id}/project_links", headers=user_headers, json=payload + ) + assert response.status_code == 409, response.text + + +@pytest.mark.asyncio +async def test_post_data_connector_project_link_unauthorized_if_not_project_editor( + sanic_client: SanicASGITestClient, + create_data_connector, + create_project, + user_headers, + member_1_headers, + member_1_user, +) -> None: + _, response = await sanic_client.post( + "/api/data/groups", headers=user_headers, json={"name": "My Group", "slug": "my-group"} + ) + assert response.status_code == 201, response.text + patch = [{"id": member_1_user.id, "role": "owner"}] + _, response = await sanic_client.patch("/api/data/groups/my-group/members", headers=user_headers, json=patch) + assert response.status_code == 200 + data_connector = await create_data_connector("Data connector 1", namespace="my-group") + data_connector_id = data_connector["id"] + project = await create_project("Project A") + project_id = project["id"] + patch = [{"id": member_1_user.id, "role": "viewer"}] + _, response = await sanic_client.patch(f"/api/data/projects/{project_id}/members", headers=user_headers, json=patch) + assert response.status_code == 200, response.text + + # Check that "member_1" can view the project and data connector + _, response = await sanic_client.get(f"/api/data/data_connectors/{data_connector_id}", headers=member_1_headers) + assert response.status_code == 200, response.text + _, response = await sanic_client.get(f"/api/data/projects/{project_id}", headers=member_1_headers) + assert response.status_code == 200, response.text + + payload = {"project_id": project_id} + _, response = await sanic_client.post( + f"/api/data/data_connectors/{data_connector_id}/project_links", headers=member_1_headers, json=payload + ) + + assert response.status_code == 404, response.text + + +@pytest.mark.asyncio +async def test_post_data_connector_project_link_unauthorized_if_not_data_connector_editor( + sanic_client: SanicASGITestClient, + create_data_connector, + create_project, + user_headers, + member_1_headers, + member_1_user, +) -> None: + _, response = await sanic_client.post( + "/api/data/groups", headers=user_headers, json={"name": "My Group", "slug": "my-group"} + ) + assert response.status_code == 201, response.text + patch = [{"id": member_1_user.id, "role": "viewer"}] + _, response = await sanic_client.patch("/api/data/groups/my-group/members", headers=user_headers, json=patch) + assert response.status_code == 200 + data_connector = await create_data_connector("Data connector 1", namespace="my-group") + data_connector_id = data_connector["id"] + project = await create_project("Project A") + project_id = project["id"] + patch = [{"id": member_1_user.id, "role": "owner"}] + _, response = await sanic_client.patch(f"/api/data/projects/{project_id}/members", headers=user_headers, json=patch) + assert response.status_code == 200, response.text + + # Check that "member_1" can view the project and data connector + _, response = await sanic_client.get(f"/api/data/data_connectors/{data_connector_id}", headers=member_1_headers) + assert response.status_code == 200, response.text + _, response = await sanic_client.get(f"/api/data/projects/{project_id}", headers=member_1_headers) + assert response.status_code == 200, response.text + + payload = {"project_id": project_id} + _, response = await sanic_client.post( + f"/api/data/data_connectors/{data_connector_id}/project_links", headers=member_1_headers, json=payload + ) + + assert response.status_code == 404, response.text + + +@pytest.mark.asyncio +async def test_post_data_connector_project_link_public_data_connector( + sanic_client: SanicASGITestClient, + create_data_connector, + create_project, + user_headers, + member_1_headers, + member_1_user, +) -> None: + data_connector = await create_data_connector( + "Data connector 1", user=member_1_user, headers=member_1_headers, visibility="public" + ) + data_connector_id = data_connector["id"] + project = await create_project("Project A") + project_id = project["id"] + + # Check that "regular_user" can view the project and data connector + _, response = await sanic_client.get(f"/api/data/data_connectors/{data_connector_id}", headers=user_headers) + assert response.status_code == 200, response.text + _, response = await sanic_client.get(f"/api/data/projects/{project_id}", headers=user_headers) + assert response.status_code == 200, response.text + + payload = {"project_id": project_id} + _, response = await sanic_client.post( + f"/api/data/data_connectors/{data_connector_id}/project_links", headers=user_headers, json=payload + ) + + assert response.status_code == 201, response.text + assert response.json is not None + link = response.json + assert link.get("data_connector_id") == data_connector_id + assert link.get("project_id") == project_id + assert link.get("created_by") == "user" + + +@pytest.mark.asyncio +@pytest.mark.parametrize("project_role", ["viewer", "editor", "owner"]) +async def test_post_data_connector_project_link_extends_read_access( + sanic_client: SanicASGITestClient, + create_data_connector, + create_project, + user_headers, + member_1_headers, + member_1_user, + project_role, +) -> None: + data_connector = await create_data_connector("Data connector 1") + data_connector_id = data_connector["id"] + project = await create_project("Project A") + project_id = project["id"] + patch = [{"id": member_1_user.id, "role": project_role}] + _, response = await sanic_client.patch(f"/api/data/projects/{project_id}/members", headers=user_headers, json=patch) + assert response.status_code == 200, response.text + + # Check that "member_1" can view the project + _, response = await sanic_client.get(f"/api/data/projects/{project_id}", headers=member_1_headers) + assert response.status_code == 200, response.text + # Check that "member_1" cannot view the data connector + _, response = await sanic_client.get(f"/api/data/data_connectors/{data_connector_id}", headers=member_1_headers) + assert response.status_code == 404, response.text + + data_connector_id = data_connector["id"] + project_id = project["id"] + payload = {"project_id": project_id} + _, response = await sanic_client.post( + f"/api/data/data_connectors/{data_connector_id}/project_links", headers=user_headers, json=payload + ) + assert response.status_code == 201, response.text + + # Check that "member_1" can now view the data connector + _, response = await sanic_client.get(f"/api/data/data_connectors/{data_connector_id}", headers=member_1_headers) + assert response.status_code == 200, response.text + assert response.json is not None + assert response.json.get("id") == data_connector_id + assert response.json.get("name") == "Data connector 1" + assert response.json.get("namespace") == "user.doe" + assert response.json.get("slug") == "data-connector-1" + + +@pytest.mark.asyncio +@pytest.mark.parametrize("group_role", ["viewer", "editor", "owner"]) +async def test_post_data_connector_project_link_does_not_extend_access_to_parent_group_members( + sanic_client: SanicASGITestClient, + create_data_connector, + user_headers, + member_1_headers, + member_1_user, + group_role, +) -> None: + data_connector = await create_data_connector("Data connector 1") + data_connector_id = data_connector["id"] + _, response = await sanic_client.post( + "/api/data/groups", headers=user_headers, json={"name": "My Group", "slug": "my-group"} + ) + assert response.status_code == 201, response.text + patch = [{"id": member_1_user.id, "role": group_role}] + _, response = await sanic_client.patch("/api/data/groups/my-group/members", headers=user_headers, json=patch) + assert response.status_code == 200 + payload = {"name": "Project A", "namespace": "my-group"} + _, response = await sanic_client.post("/api/data/projects", headers=user_headers, json=payload) + assert response.status_code == 201 + project = response.json + project_id = project["id"] + + # Check that "member_1" can view the project + _, response = await sanic_client.get(f"/api/data/projects/{project_id}", headers=member_1_headers) + assert response.status_code == 200, response.text + # Check that "member_1" cannot view the data connector + _, response = await sanic_client.get(f"/api/data/data_connectors/{data_connector_id}", headers=member_1_headers) + assert response.status_code == 404, response.text + + data_connector_id = data_connector["id"] + project_id = project["id"] + payload = {"project_id": project_id} + _, response = await sanic_client.post( + f"/api/data/data_connectors/{data_connector_id}/project_links", headers=user_headers, json=payload + ) + assert response.status_code == 201, response.text + + # Check that "member_1" can still not view the data connector + _, response = await sanic_client.get(f"/api/data/data_connectors/{data_connector_id}", headers=member_1_headers) + assert response.status_code == 404, response.text + + +@pytest.mark.asyncio +async def test_delete_data_connector_project_link( + sanic_client: SanicASGITestClient, create_data_connector, create_project, user_headers +) -> None: + data_connector = await create_data_connector("Data connector 1") + project = await create_project("Project A") + data_connector_id = data_connector["id"] + project_id = project["id"] + payload = {"project_id": project_id} + _, response = await sanic_client.post( + f"/api/data/data_connectors/{data_connector_id}/project_links", headers=user_headers, json=payload + ) + assert response.status_code == 201, response.text + link = response.json + + _, response = await sanic_client.delete( + f"/api/data/data_connectors/{data_connector_id}/project_links/{link["id"]}", headers=user_headers + ) + + assert response.status_code == 204, response.text + + # Check that the links list from the data connector is empty now + _, response = await sanic_client.get( + f"/api/data/data_connectors/{data_connector_id}/project_links", headers=user_headers + ) + + assert response.status_code == 200, response.text + assert response.json is not None + assert len(response.json) == 0 + + # Check that the links list to the project is empty now + _, response = await sanic_client.get(f"/api/data/projects/{project_id}/data_connector_links", headers=user_headers) + + assert response.status_code == 200, response.text + assert response.json is not None + assert len(response.json) == 0 + + +@pytest.mark.asyncio +async def test_delete_data_connector_after_linking( + sanic_client: SanicASGITestClient, create_data_connector, create_project, user_headers +) -> None: + data_connector = await create_data_connector("Data connector 1") + project = await create_project("Project A") + data_connector_id = data_connector["id"] + project_id = project["id"] + payload = {"project_id": project_id} + _, response = await sanic_client.post( + f"/api/data/data_connectors/{data_connector_id}/project_links", headers=user_headers, json=payload + ) + assert response.status_code == 201, response.text + + _, response = await sanic_client.delete(f"/api/data/data_connectors/{data_connector_id}", headers=user_headers) + + assert response.status_code == 204, response.text + + # Check that the project still exists + _, response = await sanic_client.get(f"/api/data/projects/{project_id}", headers=user_headers) + assert response.status_code == 200, response.text + + # Check that the links list to the project is empty now + _, response = await sanic_client.get(f"/api/data/projects/{project_id}/data_connector_links", headers=user_headers) + + assert response.status_code == 200, response.text + assert response.json is not None + assert len(response.json) == 0 + + +@pytest.mark.asyncio +async def test_delete_project_after_linking( + sanic_client: SanicASGITestClient, create_data_connector, create_project, user_headers +) -> None: + data_connector = await create_data_connector("Data connector 1") + project = await create_project("Project A") + data_connector_id = data_connector["id"] + project_id = project["id"] + payload = {"project_id": project_id} + _, response = await sanic_client.post( + f"/api/data/data_connectors/{data_connector_id}/project_links", headers=user_headers, json=payload + ) + assert response.status_code == 201, response.text + + _, response = await sanic_client.delete(f"/api/data/projects/{project_id}", headers=user_headers) + + assert response.status_code == 204, response.text + + # Check that the data connector still exists + _, response = await sanic_client.get(f"/api/data/data_connectors/{data_connector_id}", headers=user_headers) + + assert response.status_code == 200, response.text + + # Check that the links list from the data connector is empty now + _, response = await sanic_client.get( + f"/api/data/data_connectors/{data_connector_id}/project_links", headers=user_headers + ) + + assert response.status_code == 200, response.text + assert response.json is not None + assert len(response.json) == 0 + + +@pytest.mark.asyncio +async def test_patch_data_connector_secrets( + sanic_client: SanicASGITestClient, create_data_connector, user_headers +) -> None: + data_connector = await create_data_connector("My data connector") + data_connector_id = data_connector["id"] + + payload = [ + {"name": "access_key_id", "value": "access key id value"}, + {"name": "secret_access_key", "value": "secret access key value"}, + ] + _, response = await sanic_client.patch( + f"/api/data/data_connectors/{data_connector_id}/secrets", headers=user_headers, json=payload + ) + + assert response.status_code == 200, response.json + assert response.json is not None + secrets = response.json + assert len(secrets) == 2 + assert {s["name"] for s in secrets} == {"access_key_id", "secret_access_key"} + + # Check that the secrets are returned from a GET request + _, response = await sanic_client.get(f"/api/data/data_connectors/{data_connector_id}/secrets", headers=user_headers) + assert response.status_code == 200, response.json + assert response.json is not None + secrets = response.json + assert len(secrets) == 2 + assert {s["name"] for s in secrets} == {"access_key_id", "secret_access_key"} + + +@pytest.mark.asyncio +async def test_patch_data_connector_secrets_update_secrets( + sanic_client: SanicASGITestClient, create_data_connector, user_headers +) -> None: + data_connector = await create_data_connector("My data connector") + data_connector_id = data_connector["id"] + payload = [ + {"name": "access_key_id", "value": "access key id value"}, + {"name": "secret_access_key", "value": "secret access key value"}, + ] + _, response = await sanic_client.patch( + f"/api/data/data_connectors/{data_connector_id}/secrets", headers=user_headers, json=payload + ) + assert response.status_code == 200, response.json + assert response.json is not None + secrets = response.json + assert len(secrets) == 2 + assert {s["name"] for s in secrets} == {"access_key_id", "secret_access_key"} + secret_ids = {s["secret_id"] for s in secrets} + + payload = [ + {"name": "access_key_id", "value": "new access key id value"}, + {"name": "secret_access_key", "value": "new secret access key value"}, + ] + _, response = await sanic_client.patch( + f"/api/data/data_connectors/{data_connector_id}/secrets", headers=user_headers, json=payload + ) + + assert response.status_code == 200, response.json + assert response.json is not None + secrets = response.json + assert len(secrets) == 2 + assert {s["name"] for s in secrets} == {"access_key_id", "secret_access_key"} + assert {s["secret_id"] for s in secrets} == secret_ids + + # Check that the secrets are returned from a GET request + _, response = await sanic_client.get(f"/api/data/data_connectors/{data_connector_id}/secrets", headers=user_headers) + assert response.status_code == 200, response.json + assert response.json is not None + secrets = response.json + assert len(secrets) == 2 + assert {s["name"] for s in secrets} == {"access_key_id", "secret_access_key"} + assert {s["secret_id"] for s in secrets} == secret_ids + + +@pytest.mark.asyncio +async def test_patch_data_connector_secrets_add_and_remove_secrets( + sanic_client: SanicASGITestClient, create_data_connector, user_headers +) -> None: + data_connector = await create_data_connector("My data connector") + data_connector_id = data_connector["id"] + payload = [ + {"name": "access_key_id", "value": "access key id value"}, + {"name": "secret_access_key", "value": "secret access key value"}, + ] + _, response = await sanic_client.patch( + f"/api/data/data_connectors/{data_connector_id}/secrets", headers=user_headers, json=payload + ) + assert response.status_code == 200, response.json + assert response.json is not None + secrets = response.json + assert len(secrets) == 2 + assert {s["name"] for s in secrets} == {"access_key_id", "secret_access_key"} + access_key_id_secret_id = next(filter(lambda s: s["name"] == "access_key_id", secrets), None) + + payload = [ + {"name": "access_key_id", "value": "new access key id value"}, + {"name": "secret_access_key", "value": None}, + {"name": "password", "value": "password"}, + ] + _, response = await sanic_client.patch( + f"/api/data/data_connectors/{data_connector_id}/secrets", headers=user_headers, json=payload + ) + + assert response.status_code == 200, response.json + assert response.json is not None + secrets = response.json + assert len(secrets) == 2 + assert {s["name"] for s in secrets} == {"access_key_id", "password"} + new_access_key_id_secret_id = next(filter(lambda s: s["name"] == "access_key_id", secrets), None) + assert new_access_key_id_secret_id == access_key_id_secret_id + + # Check that the secrets are returned from a GET request + _, response = await sanic_client.get(f"/api/data/data_connectors/{data_connector_id}/secrets", headers=user_headers) + assert response.status_code == 200, response.json + assert response.json is not None + secrets = response.json + assert len(secrets) == 2 + assert {s["name"] for s in secrets} == {"access_key_id", "password"} + + # Check the associated secrets + _, response = await sanic_client.get("/api/data/user/secrets", params={"kind": "storage"}, headers=user_headers) + + assert response.status_code == 200 + assert response.json is not None + assert len(response.json) == 2 + assert {s["name"] for s in secrets} == {"access_key_id", "password"} + + +@pytest.mark.asyncio +async def test_delete_data_connector_secrets( + sanic_client: SanicASGITestClient, create_data_connector, user_headers +) -> None: + data_connector = await create_data_connector("My data connector") + data_connector_id = data_connector["id"] + payload = [ + {"name": "access_key_id", "value": "access key id value"}, + {"name": "secret_access_key", "value": "secret access key value"}, + ] + _, response = await sanic_client.patch( + f"/api/data/data_connectors/{data_connector_id}/secrets", headers=user_headers, json=payload + ) + assert response.status_code == 200, response.json + assert response.json is not None + secrets = response.json + assert len(secrets) == 2 + assert {s["name"] for s in secrets} == {"access_key_id", "secret_access_key"} + + _, response = await sanic_client.delete( + f"/api/data/data_connectors/{data_connector_id}/secrets", headers=user_headers + ) + + assert response.status_code == 204, response.json + + # Check that the secrets list is empty from the GET request + _, response = await sanic_client.get(f"/api/data/data_connectors/{data_connector_id}/secrets", headers=user_headers) + assert response.status_code == 200, response.json + assert response.json == [], response.json + + # Check that the associated secrets are deleted + _, response = await sanic_client.get("/api/data/user/secrets", params={"kind": "storage"}, headers=user_headers) + + assert response.status_code == 200 + assert response.json == [], response.json diff --git a/test/bases/renku_data_services/data_api/test_migrations.py b/test/bases/renku_data_services/data_api/test_migrations.py index 2f78563d5..e1b4e3274 100644 --- a/test/bases/renku_data_services/data_api/test_migrations.py +++ b/test/bases/renku_data_services/data_api/test_migrations.py @@ -1,9 +1,12 @@ import base64 +from datetime import UTC, datetime from typing import Any import pytest +import sqlalchemy as sa from alembic.script import ScriptDirectory from sanic_testing.testing import SanicASGITestClient +from ulid import ULID from renku_data_services.app_config.config import Config from renku_data_services.message_queue.avro_models.io.renku.events import v2 @@ -94,3 +97,39 @@ async def test_migration_to_f34b87ddd954( ] assert len(group_removed_events) == 2 assert set(added_group_ids) == {e.id for e in group_removed_events} + + +@pytest.mark.asyncio +async def test_migration_to_584598f3b769(app_config: Config) -> None: + run_migrations_for_app("common", "dcc1c1ee662f") + await app_config.kc_user_repo.initialize(app_config.kc_api) + await app_config.group_repo.generate_user_namespaces() + env_id = str(ULID()) + async with app_config.db.async_session_maker() as session, session.begin(): + await session.execute( + sa.text( + "INSERT INTO " + "sessions.environments(id, name, created_by_id, creation_date, container_image, default_url) " + "VALUES (:id, :name, :created_by, :date, :image, :url)" + ).bindparams( + id=env_id, + name="test", + created_by="test", + date=datetime.now(UTC), + image="test", + url="/test", + ) + ) + run_migrations_for_app("common", "584598f3b769") + async with app_config.db.async_session_maker() as session, session.begin(): + res = await session.execute(sa.text("SELECT * FROM sessions.environments")) + data = res.all() + assert len(data) == 1 + env = data[0]._mapping + assert env["id"] == env_id + assert env["name"] == "test" + assert env["container_image"] == "test" + assert env["default_url"] == "/test" + assert env["port"] == 8888 + assert env["uid"] == 1000 + assert env["gid"] == 1000 diff --git a/test/bases/renku_data_services/data_api/test_notebooks.py b/test/bases/renku_data_services/data_api/test_notebooks.py new file mode 100644 index 000000000..e9e973a8a --- /dev/null +++ b/test/bases/renku_data_services/data_api/test_notebooks.py @@ -0,0 +1,326 @@ +"""Tests for notebook blueprints.""" + +import asyncio +import os +import shutil +from collections.abc import AsyncIterator +from unittest.mock import MagicMock +from uuid import uuid4 + +import pytest +import pytest_asyncio +from kr8s.asyncio.objects import Pod +from sanic_testing.testing import SanicASGITestClient + +from renku_data_services.notebooks.api.classes.k8s_client import JupyterServerV1Alpha1Kr8s + +from .utils import K3DCluster, setup_amalthea + +os.environ["KUBECONFIG"] = ".k3d-config.yaml" + + +@pytest.fixture(scope="module", autouse=True) +def cluster() -> K3DCluster: + if shutil.which("k3d") is None: + pytest.skip("Requires k3d for cluster creation") + + with K3DCluster("renku-test-notebooks") as cluster: + setup_amalthea("amalthea-js", "amalthea", "0.12.2", cluster) + + yield cluster + + +@pytest.fixture +def non_mocked_hosts() -> list: + """Hosts that should not get mocked during tests.""" + + return ["127.0.0.1"] + + +@pytest.fixture +def renku_image() -> str: + return "renku/renkulab-py:3.10-0.24.0" + + +@pytest.fixture +def unknown_server_name() -> str: + return "unknown" + + +@pytest.fixture +def server_name() -> str: + random_name_part = str(uuid4()) + session_name = f"test-session-{random_name_part}" + return session_name + + +@pytest.fixture +def pod_name(server_name: str) -> str: + return f"{server_name}-0" + + +@pytest_asyncio.fixture +async def jupyter_server(renku_image: str, server_name: str, pod_name: str) -> AsyncIterator[JupyterServerV1Alpha1Kr8s]: + """Fake server to have the minimal set of objects for tests""" + + server = await JupyterServerV1Alpha1Kr8s( + { + "metadata": {"name": server_name, "labels": {"renku.io/safe-username": "user"}}, + "spec": {"jupyterServer": {"image": renku_image}, "routing": {"host": "locahost"}, "auth": {"token": ""}}, + } + ) + + await server.create() + pod = await Pod(dict(metadata=dict(name=pod_name))) + max_retries = 200 + sleep_seconds = 0.2 + retries = 0 + while True: + retries += 1 + pod_exists = await pod.exists() + if pod_exists: + break + if retries > max_retries: + raise ValueError( + f"The pod {pod_name} for the session {server_name} could not found even after {max_retries} " + f"retries with {sleep_seconds} seconds of sleep after each retry." + ) + await asyncio.sleep(sleep_seconds) + await pod.refresh() + await pod.wait("condition=Ready") + yield server + await server.delete("Foreground") + + +@pytest_asyncio.fixture() +async def practice_jupyter_server(renku_image: str, server_name: str) -> AsyncIterator[JupyterServerV1Alpha1Kr8s]: + """Fake server for non pod related tests""" + + server = await JupyterServerV1Alpha1Kr8s( + { + "metadata": { + "name": server_name, + "labels": {"renku.io/safe-username": "user"}, + "annotations": { + "renku.io/branch": "dummy", + "renku.io/commit-sha": "sha", + "renku.io/default_image_used": "default/image", + "renku.io/namespace": "default", + "renku.io/projectName": "dummy", + "renku.io/repository": "dummy", + }, + }, + "spec": {"jupyterServer": {"image": renku_image}}, + } + ) + + await server.create() + yield server + await server.delete("Foreground") + + +@pytest.fixture() +def authenticated_user_headers(user_headers): + return dict({"Renku-Auth-Refresh-Token": "test-refresh-token"}, **user_headers) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("image,expected_status_code", [("python:3.12", 200), ("shouldnotexist:0.42", 404)]) +async def test_check_docker_image(sanic_client: SanicASGITestClient, user_headers, image, expected_status_code): + """Validate that the images endpoint answers correctly. + + Needs the responses package in case docker queries must be mocked + """ + + _, res = await sanic_client.get(f"/api/data/notebooks/images/?image_url={image}", headers=user_headers) + + assert res.status_code == expected_status_code, res.text + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "server_name_fixture,expected_status_code", [("unknown_server_name", 404), ("server_name", 200)] +) +async def test_log_retrieval( + sanic_client: SanicASGITestClient, + request, + server_name_fixture, + expected_status_code, + jupyter_server, + authenticated_user_headers, +): + """Validate that the logs endpoint answers correctly""" + + server_name = request.getfixturevalue(server_name_fixture) + + _, res = await sanic_client.get(f"/api/data/notebooks/logs/{server_name}", headers=authenticated_user_headers) + + assert res.status_code == expected_status_code, res.text + + +@pytest.mark.asyncio +async def test_server_options(sanic_client: SanicASGITestClient, user_headers): + _, res = await sanic_client.get("/api/data/notebooks/server_options", headers=user_headers) + + assert res.status_code == 200, res.text + assert res.json == { + "cloudstorage": {"enabled": False}, + "defaultUrl": { + "default": "/lab", + "displayName": "Default Environment", + "options": ["/lab"], + "order": 1, + "type": "enum", + }, + "lfs_auto_fetch": { + "default": False, + "displayName": "Automatically fetch LFS data", + "order": 6, + "type": "boolean", + }, + } + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "server_name_fixture,expected_status_code", [("unknown_server_name", 204), ("server_name", 204)] +) +async def test_stop_server( + sanic_client: SanicASGITestClient, + request, + server_name_fixture, + expected_status_code, + practice_jupyter_server, + authenticated_user_headers, +): + server_name = request.getfixturevalue(server_name_fixture) + + _, res = await sanic_client.delete(f"/api/data/notebooks/servers/{server_name}", headers=authenticated_user_headers) + + assert res.status_code == expected_status_code, res.text + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "server_name_fixture,expected_status_code, patch", + [("unknown_server_name", 404, {}), ("server_name", 200, {"state": "hibernated"})], +) +async def test_patch_server( + sanic_client: SanicASGITestClient, + request, + server_name_fixture, + expected_status_code, + patch, + practice_jupyter_server, + authenticated_user_headers, +): + server_name = request.getfixturevalue(server_name_fixture) + + _, res = await sanic_client.patch( + f"/api/data/notebooks/servers/{server_name}", json=patch, headers=authenticated_user_headers + ) + + assert res.status_code == expected_status_code, res.text + + +class AttributeDictionary(dict): + """Enables accessing dictionary keys as attributes""" + + def __init__(self, dictionary): + for key, value in dictionary.items(): + # TODO check if key is a valid identifier + if key == "list": + raise ValueError("'list' is not allowed as a key") + if isinstance(value, dict): + value = AttributeDictionary(value) + elif isinstance(value, list): + value = [AttributeDictionary(v) if isinstance(v, dict) else v for v in value] + self.__setattr__(key, value) + self[key] = value + + def list(self): + [value for _, value in self.items()] + + def __setitem__(self, k, v): + if k == "list": + raise ValueError("'list' is not allowed as a key") + self.__setattr__(k, v) + return super().__setitem__(k, v) + + +@pytest.fixture +def fake_gitlab_projects(): + class GitLabProject(AttributeDictionary): + def __init__(self): + super().__init__({}) + + def get(self, name, default=None): + if name not in self: + return AttributeDictionary( + { + "path": "my-test", + "path_with_namespace": "test-namespace/my-test", + "branches": {"main": AttributeDictionary({})}, + "commits": {"ee4b1c9fedc99abe5892ee95320bbd8471c5985b": AttributeDictionary({})}, + "id": 5407, + "http_url_to_repo": "https://gitlab-url.com/test-namespace/my-test.git", + "web_url": "https://gitlab-url.com/test-namespace/my-test", + } + ) + return super().get(name, default) + + return GitLabProject() + + +@pytest.fixture() +def fake_gitlab(mocker, fake_gitlab_projects): + gitlab = mocker.patch("renku_data_services.notebooks.api.classes.user.Gitlab") + gitlab_mock = MagicMock() + gitlab_mock.auth = MagicMock() + gitlab_mock.projects = fake_gitlab_projects + gitlab_mock.user = AttributeDictionary( + {"username": "john.doe", "name": "John Doe", "email": "john.doe@notebooks-tests.renku.ch"} + ) + gitlab_mock.url = "https://gitlab-url.com" + gitlab.return_value = gitlab_mock + return gitlab + + +@pytest.mark.asyncio +async def test_old_start_server(sanic_client: SanicASGITestClient, authenticated_user_headers, fake_gitlab): + data = { + "branch": "main", + "commit_sha": "ee4b1c9fedc99abe5892ee95320bbd8471c5985b", + "namespace": "test-namespace", + "project": "my-test", + "image": "alpine:3", + } + + _, res = await sanic_client.post("/api/data/notebooks/old/servers/", json=data, headers=authenticated_user_headers) + + assert res.status_code == 201, res.text + + server_name = res.json["name"] + _, res = await sanic_client.delete(f"/api/data/notebooks/servers/{server_name}", headers=authenticated_user_headers) + + assert res.status_code == 204, res.text + + +@pytest.mark.asyncio +async def test_start_server(sanic_client: SanicASGITestClient, authenticated_user_headers, fake_gitlab): + data = { + "branch": "main", + "commit_sha": "ee4b1c9fedc99abe5892ee95320bbd8471c5985b", + "project_id": "test-namespace/my-test", + "launcher_id": "test_launcher", + "image": "alpine:3", + } + + _, res = await sanic_client.post("/api/data/notebooks/servers/", json=data, headers=authenticated_user_headers) + + assert res.status_code == 201, res.text + + server_name = res.json["name"] + _, res = await sanic_client.delete(f"/api/data/notebooks/servers/{server_name}", headers=authenticated_user_headers) + + assert res.status_code == 204, res.text diff --git a/test/bases/renku_data_services/data_api/test_resource_pools.py b/test/bases/renku_data_services/data_api/test_resource_pools.py index 9d5da38fa..b42ad7085 100644 --- a/test/bases/renku_data_services/data_api/test_resource_pools.py +++ b/test/bases/renku_data_services/data_api/test_resource_pools.py @@ -220,7 +220,7 @@ async def test_put_resource_class( ) -> None: _, res = await create_rp(valid_resource_pool_payload, sanic_client) assert res.status_code == 201 - assert len(res.json.get("classes", [])) == 1 + assert len(res.json.get("classes", [])) == 2 res_cls_payload = {**res.json.get("classes", [])[0], "cpu": 5.0} res_cls_expected_response = {**res.json.get("classes", [])[0], "cpu": 5.0} res_cls_payload.pop("id", None) @@ -592,7 +592,7 @@ async def test_patch_tolerations( rp = res.json rp_id = rp["id"] assert len(rp["classes"]) > 0 - res_class = rp["classes"][0] + res_class = [i for i in rp["classes"] if len(i.get("tolerations", [])) > 0][0] res_class_id = res_class["id"] assert len(res_class["tolerations"]) == 1 # Patch in a 2nd toleration @@ -632,7 +632,7 @@ async def test_patch_affinities( rp = res.json rp_id = rp["id"] assert len(rp["classes"]) > 0 - res_class = rp["classes"][0] + res_class = [i for i in rp["classes"] if len(i.get("node_affinities", [])) > 0][0] res_class_id = res_class["id"] assert len(res_class["node_affinities"]) == 1 assert res_class["node_affinities"][0] == {"key": "affinity1", "required_during_scheduling": False} @@ -686,7 +686,7 @@ async def test_remove_all_tolerations_put( rp = res.json rp_id = rp["id"] assert len(rp["classes"]) > 0 - res_class = rp["classes"][0] + res_class = [i for i in rp["classes"] if len(i.get("tolerations", [])) > 0][0] res_class_id = res_class["id"] assert len(res_class["tolerations"]) == 1 assert res_class["tolerations"][0] == "toleration1" @@ -718,7 +718,7 @@ async def test_remove_all_affinities_put( rp = res.json rp_id = rp["id"] assert len(rp["classes"]) > 0 - res_class = rp["classes"][0] + res_class = [i for i in rp["classes"] if len(i.get("node_affinities", [])) > 0][0] res_class_id = res_class["id"] assert len(res_class["node_affinities"]) == 1 assert res_class["node_affinities"][0] == {"key": "affinity1", "required_during_scheduling": False} @@ -750,7 +750,7 @@ async def test_put_tolerations( rp = res.json rp_id = rp["id"] assert len(rp["classes"]) > 0 - res_class = rp["classes"][0] + res_class = [i for i in rp["classes"] if len(i.get("tolerations", [])) > 0][0] res_class_id = res_class["id"] assert len(res_class["tolerations"]) == 1 assert res_class["tolerations"][0] == "toleration1" @@ -782,7 +782,7 @@ async def test_put_affinities( rp = res.json rp_id = rp["id"] assert len(rp["classes"]) > 0 - res_class = rp["classes"][0] + res_class = [i for i in rp["classes"] if len(i.get("node_affinities", [])) > 0][0] res_class_id = res_class["id"] assert len(res_class["node_affinities"]) == 1 assert res_class["node_affinities"][0] == {"key": "affinity1", "required_during_scheduling": False} @@ -820,7 +820,7 @@ async def test_get_all_tolerations( rp = res.json rp_id = rp["id"] assert len(rp["classes"]) > 0 - res_class = rp["classes"][0] + res_class = [i for i in rp["classes"] if len(i.get("tolerations", [])) > 0][0] res_class_id = res_class["id"] _, res = await sanic_client.get( f"/api/data/resource_pools/{rp_id}/classes/{res_class_id}/tolerations", @@ -840,7 +840,7 @@ async def test_get_all_affinities( rp = res.json rp_id = rp["id"] assert len(rp["classes"]) > 0 - res_class = rp["classes"][0] + res_class = [i for i in rp["classes"] if len(i.get("node_affinities", [])) > 0][0] res_class_id = res_class["id"] _, res = await sanic_client.get( f"/api/data/resource_pools/{rp_id}/classes/{res_class_id}/node_affinities", diff --git a/test/bases/renku_data_services/data_api/test_schemathesis.py b/test/bases/renku_data_services/data_api/test_schemathesis.py index b5fa72c6a..b24600d59 100644 --- a/test/bases/renku_data_services/data_api/test_schemathesis.py +++ b/test/bases/renku_data_services/data_api/test_schemathesis.py @@ -39,9 +39,9 @@ async def apispec(sanic_client: SanicASGITestClient) -> BaseOpenAPISchema: # Same issue as for "security" for the "If-Match" header. # We skip header values which cannot be encoded as ascii. @schemathesis.hook -def filter_headers(context: HookContext, headers: dict[str, str]) -> bool: +def filter_headers(context: HookContext, headers: dict[str, str] | None) -> bool: op = context.operation - if op.method.upper() == "PATCH" and (op.path == "/projects/{project_id}" or op.path == "/platform/config"): + if headers is not None and op.method.upper() == "PATCH": if_match = headers.get("If-Match") if if_match and isinstance(if_match, str): try: @@ -73,6 +73,12 @@ def filter_query(context: HookContext, query: dict[str, str] | None) -> bool: ("/oauth2/providers", "POST"), ] +# TODO: RE-enable schemathesis when CI setup for notebooks / sessions is ready +EXCLUDE_PATH_PREFIXES = [ + "/sessions", + "/notebooks", +] + @pytest.mark.schemathesis @pytest.mark.asyncio @@ -84,6 +90,9 @@ async def test_api_schemathesis( admin_headers: dict, requests_statistics: list[timedelta], ) -> None: + for exclude_prefix in EXCLUDE_PATH_PREFIXES: + if case.path.startswith(exclude_prefix): + return req_kwargs = case.as_requests_kwargs(headers=admin_headers) _, res = await sanic_client.request(**req_kwargs) res.request.uri = str(res.url) diff --git a/test/bases/renku_data_services/data_api/test_sessions.py b/test/bases/renku_data_services/data_api/test_sessions.py index 825e190a1..732e5001a 100644 --- a/test/bases/renku_data_services/data_api/test_sessions.py +++ b/test/bases/renku_data_services/data_api/test_sessions.py @@ -1,9 +1,32 @@ """Tests for sessions blueprints.""" +import os +import shutil +from asyncio import AbstractEventLoop from typing import Any import pytest -from sanic_testing.testing import SanicASGITestClient +from pytest import FixtureRequest +from sanic_testing.testing import SanicASGITestClient, TestingResponse + +from renku_data_services.app_config.config import Config +from renku_data_services.crc.apispec import ResourcePool +from renku_data_services.users.models import UserInfo + +from .utils import K3DCluster, setup_amalthea + +os.environ["KUBECONFIG"] = ".k3d-config.yaml" + + +@pytest.fixture(scope="module", autouse=True) +def cluster() -> K3DCluster: + if shutil.which("k3d") is None: + pytest.skip("Requires k3d for cluster creation") + + with K3DCluster("renku-test-session") as cluster: + setup_amalthea("amalthea-se", "amalthea-sessions", "0.0.10-new-operator-chart", cluster) + + yield cluster @pytest.fixture @@ -29,10 +52,12 @@ async def create_session_launcher_helper(name: str, project_id: str, **payload) payload = payload.copy() payload.update({"name": name, "project_id": project_id}) payload["description"] = payload.get("description") or "A session launcher." - payload["environment_kind"] = payload.get("environment_kind") or "container_image" - - if payload["environment_kind"] == "container_image": - payload["container_image"] = payload.get("container_image") or "some_image:some_tag" + if "environment" not in payload: + payload["environment"] = { + "environment_kind": "CUSTOM", + "name": "Test", + "container_image": "some_image:some_tag", + } _, res = await sanic_client.post("/api/data/session_launchers", headers=user_headers, json=payload) @@ -43,6 +68,33 @@ async def create_session_launcher_helper(name: str, project_id: str, **payload) return create_session_launcher_helper +@pytest.fixture +def launch_session( + sanic_client: SanicASGITestClient, + user_headers: dict, + regular_user: UserInfo, + app_config: Config, + request: FixtureRequest, + event_loop: AbstractEventLoop, +): + async def launch_session_helper( + payload: dict, headers: dict = user_headers, user: UserInfo = regular_user + ) -> TestingResponse: + _, res = await sanic_client.post("/api/data/sessions", headers=headers, json=payload) + assert res.status_code == 201, res.text + assert res.json is not None + assert "name" in res.json + session_id: str = res.json.get("name", "unknown") + + def cleanup(): + event_loop.run_until_complete(app_config.nb_config.k8s_v2_client.delete_server(session_id, user.id)) + + # request.addfinalizer(cleanup) + return res + + return launch_session_helper + + @pytest.mark.asyncio async def test_get_all_session_environments( sanic_client: SanicASGITestClient, unauthorized_headers, create_session_environment @@ -110,7 +162,7 @@ async def test_post_session_environment_unauthorized(sanic_client: SanicASGITest _, res = await sanic_client.post("/api/data/environments", headers=user_headers, json=payload) - assert res.status_code == 403, res.text + assert res.status_code == 401, res.text @pytest.mark.asyncio @@ -150,7 +202,7 @@ async def test_patch_session_environment_unauthorized( _, res = await sanic_client.patch(f"/api/data/environments/{environment_id}", headers=user_headers, json=payload) - assert res.status_code == 403, res.text + assert res.status_code == 401, res.text @pytest.mark.asyncio @@ -217,8 +269,7 @@ async def test_get_session_launcher( "Some launcher", project_id=project["id"], description="Some launcher.", - environment_kind="global_environment", - environment_id=env["id"], + environment={"id": env["id"]}, ) launcher_id = launcher["id"] @@ -229,9 +280,10 @@ async def test_get_session_launcher( assert res.json.get("name") == "Some launcher" assert res.json.get("project_id") == project["id"] assert res.json.get("description") == "Some launcher." - assert res.json.get("environment_kind") == "global_environment" - assert res.json.get("environment_id") == env["id"] - assert res.json.get("container_image") is None + environment = res.json.get("environment", {}) + assert environment.get("environment_kind") == "GLOBAL" + assert environment.get("id") == env["id"] + assert environment.get("container_image") == env["container_image"] assert res.json.get("resource_class_id") is None @@ -276,9 +328,12 @@ async def test_post_session_launcher( "name": "Launcher 1", "project_id": project["id"], "description": "A session launcher.", - "environment_kind": "container_image", - "container_image": "some_image:some_tag", "resource_class_id": resource_pool["classes"][0]["id"], + "environment": { + "container_image": "some_image:some_tag", + "name": "custom_name", + "environment_kind": "CUSTOM", + }, } _, res = await sanic_client.post("/api/data/session_launchers", headers=admin_headers, json=payload) @@ -288,9 +343,10 @@ async def test_post_session_launcher( assert res.json.get("name") == "Launcher 1" assert res.json.get("project_id") == project["id"] assert res.json.get("description") == "A session launcher." - assert res.json.get("environment_kind") == "container_image" - assert res.json.get("container_image") == "some_image:some_tag" - assert res.json.get("environment_id") is None + environment = res.json.get("environment", {}) + assert environment.get("environment_kind") == "CUSTOM" + assert environment.get("container_image") == "some_image:some_tag" + assert environment.get("id") is not None assert res.json.get("resource_class_id") == resource_pool["classes"][0]["id"] @@ -303,20 +359,21 @@ async def test_post_session_launcher_unauthorized( create_project, create_resource_pool, regular_user, + create_session_environment, ) -> None: project = await create_project("Some project") resource_pool_data = valid_resource_pool_payload resource_pool_data["public"] = False resource_pool = await create_resource_pool(admin=True, **resource_pool_data) + environment = await create_session_environment("Test environment") payload = { "name": "Launcher 1", "project_id": project["id"], "description": "A session launcher.", - "environment_kind": "container_image", - "container_image": "some_image:some_tag", "resource_class_id": resource_pool["classes"][0]["id"], + "environment": {"id": environment["id"]}, } _, res = await sanic_client.post("/api/data/session_launchers", headers=user_headers, json=payload) @@ -338,3 +395,183 @@ async def test_delete_session_launcher( _, res = await sanic_client.delete(f"/api/data/session_launchers/{launcher_id}", headers=user_headers) assert res.status_code == 204, res.text + + +@pytest.mark.asyncio +async def test_patch_session_launcher( + sanic_client: SanicASGITestClient, + valid_resource_pool_payload: dict[str, Any], + user_headers, + create_project, + create_resource_pool, +) -> None: + project = await create_project("Some project 1") + resource_pool_data = valid_resource_pool_payload + resource_pool = await create_resource_pool(admin=True, **resource_pool_data) + + payload = { + "name": "Launcher 1", + "project_id": project["id"], + "description": "A session launcher.", + "resource_class_id": resource_pool["classes"][0]["id"], + "environment": { + "container_image": "some_image:some_tag", + "name": "custom_name", + "environment_kind": "CUSTOM", + }, + } + + _, res = await sanic_client.post("/api/data/session_launchers", headers=user_headers, json=payload) + + assert res.status_code == 201, res.text + assert res.json is not None + assert res.json.get("name") == "Launcher 1" + assert res.json.get("description") == "A session launcher." + environment = res.json.get("environment", {}) + assert environment.get("environment_kind") == "CUSTOM" + assert environment.get("container_image") == "some_image:some_tag" + assert environment.get("id") is not None + assert res.json.get("resource_class_id") == resource_pool["classes"][0]["id"] + + patch_payload = { + "name": "New Name", + "description": "An updated session launcher.", + "resource_class_id": resource_pool["classes"][1]["id"], + } + _, res = await sanic_client.patch( + f"/api/data/session_launchers/{res.json['id']}", headers=user_headers, json=patch_payload + ) + assert res.status_code == 200, res.text + assert res.json is not None + assert res.json.get("name") == patch_payload["name"] + assert res.json.get("description") == patch_payload["description"] + assert res.json.get("resource_class_id") == patch_payload["resource_class_id"] + + +@pytest.mark.asyncio +async def test_patch_session_launcher_environment( + sanic_client: SanicASGITestClient, + valid_resource_pool_payload: dict[str, Any], + user_headers, + create_project, + create_resource_pool, + create_session_environment, +) -> None: + project = await create_project("Some project 1") + resource_pool_data = valid_resource_pool_payload + resource_pool = await create_resource_pool(admin=True, **resource_pool_data) + global_env = await create_session_environment("Some environment") + + # Create a new custom environment with the launcher + payload = { + "name": "Launcher 1", + "project_id": project["id"], + "description": "A session launcher.", + "resource_class_id": resource_pool["classes"][0]["id"], + "environment": { + "container_image": "some_image:some_tag", + "name": "custom_name", + "environment_kind": "CUSTOM", + }, + } + _, res = await sanic_client.post("/api/data/session_launchers", headers=user_headers, json=payload) + assert res.status_code == 201, res.text + assert res.json is not None + environment = res.json.get("environment", {}) + assert environment.get("environment_kind") == "CUSTOM" + assert environment.get("container_image") == "some_image:some_tag" + assert environment.get("id") is not None + + # Patch in a global environment + patch_payload = { + "environment": {"id": global_env["id"]}, + } + _, res = await sanic_client.patch( + f"/api/data/session_launchers/{res.json['id']}", headers=user_headers, json=patch_payload + ) + assert res.status_code == 200, res.text + assert res.json is not None + launcher_id = res.json["id"] + global_env["environment_kind"] = "GLOBAL" + assert res.json["environment"] == global_env + + # Trying to patch a field of the global environment should fail + patch_payload = { + "environment": {"container_image": "new_image"}, + } + _, res = await sanic_client.patch( + f"/api/data/session_launchers/{launcher_id}", headers=user_headers, json=patch_payload + ) + assert res.status_code == 422, res.text + + # Patching in a wholly new custom environment over the global is allowed + patch_payload = { + "environment": {"container_image": "new_image", "name": "new_custom", "environment_kind": "CUSTOM"}, + } + _, res = await sanic_client.patch( + f"/api/data/session_launchers/{launcher_id}", headers=user_headers, json=patch_payload + ) + assert res.status_code == 200, res.text + + # Should be able to patch some fields of the custom environment + patch_payload = { + "environment": {"container_image": "nginx:latest"}, + } + _, res = await sanic_client.patch( + f"/api/data/session_launchers/{launcher_id}", headers=user_headers, json=patch_payload + ) + assert res.status_code == 200, res.text + assert res.json["environment"]["container_image"] == "nginx:latest" + + +@pytest.fixture +def anonymous_user_headers() -> dict[str, str]: + return {"Renku-Auth-Anon-Id": "some-random-value-1234"} + + +@pytest.mark.asyncio +@pytest.mark.skip(reason="Setup for testing sessions is not done yet.") # TODO: enable in follwup PR +async def test_starting_session_anonymous( + sanic_client: SanicASGITestClient, + create_project, + create_session_launcher, + user_headers, + app_config: Config, + admin_headers, + launch_session, + anonymous_user_headers, +) -> None: + _, res = await sanic_client.post( + "/api/data/resource_pools", + json=ResourcePool.model_validate(app_config.default_resource_pool, from_attributes=True).model_dump( + mode="json", exclude_none=True + ), + headers=admin_headers, + ) + assert res.status_code == 201, res.text + project: dict[str, Any] = await create_project( + "Some project", + visibility="public", + repositories=["https://github.com/SwissDataScienceCenter/renku-data-services"], + ) + launcher: dict[str, Any] = await create_session_launcher( + "Launcher 1", + project_id=project["id"], + environment={ + "container_image": "renku/renkulab-py:3.10-0.23.0-amalthea-sessions-3", + "environment_kind": "CUSTOM", + "name": "test", + "port": 8888, + }, + ) + launcher_id = launcher["id"] + project_id = project["id"] + payload = {"project_id": project_id, "launcher_id": launcher_id} + session_res = await launch_session(payload, headers=anonymous_user_headers) + _, res = await sanic_client.get(f"/api/data/sessions/{session_res.json['name']}", headers=anonymous_user_headers) + assert res.status_code == 200, res.text + assert res.json["name"] == session_res.json["name"] + _, res = await sanic_client.get("/api/data/sessions", headers=anonymous_user_headers) + assert res.status_code == 200, res.text + assert len(res.json) > 0 + assert session_res.json["name"] in [i["name"] for i in res.json] diff --git a/test/bases/renku_data_services/data_api/test_storage_v2.py b/test/bases/renku_data_services/data_api/test_storage_v2.py deleted file mode 100644 index 5c34f72e6..000000000 --- a/test/bases/renku_data_services/data_api/test_storage_v2.py +++ /dev/null @@ -1,445 +0,0 @@ -from typing import Any, Optional - -import pytest - -from renku_data_services.users.models import UserInfo - - -@pytest.fixture -def project_members(member_1_user: UserInfo, member_2_user: UserInfo) -> list[dict[str, str]]: - """List of a project's members.""" - return [{"id": member_1_user.id, "role": "viewer"}, {"id": member_2_user.id, "role": "owner"}] - - -@pytest.fixture -def project_owner_member_headers(member_2_headers: dict[str, str]) -> dict[str, str]: - """Authentication headers for a normal project owner user.""" - return member_2_headers - - -@pytest.fixture -def project_non_member_headers(unauthorized_headers: dict[str, str]) -> dict[str, str]: - """Authentication headers for a user that isn't a member of a project.""" - return unauthorized_headers - - -@pytest.fixture -def project_normal_member_headers(member_1_headers: dict[str, str]) -> dict[str, str]: - """Authentication headers for a user that isn't a member of a project.""" - return member_1_headers - - -@pytest.fixture -def create_storage(sanic_client, user_headers, admin_headers, create_project, project_members): - async def create_storage_helper(project_id: Optional[str] = None, admin: bool = False, **payload) -> dict[str, Any]: - if not project_id: - project = await create_project("Project", members=project_members) - project_id = project["id"] - - headers = admin_headers if admin else user_headers - storage_payload = { - "project_id": project_id, - "name": "my-storage", - "configuration": { - "type": "s3", - "provider": "AWS", - "region": "us-east-1", - }, - "source_path": "bucket/my-folder", - "target_path": "my/target", - } - storage_payload.update(payload) - - _, response = await sanic_client.post("/api/data/storages_v2", headers=headers, json=storage_payload) - - assert response.status_code == 201, response.text - return response.json - - return create_storage_helper - - -@pytest.mark.asyncio -@pytest.mark.parametrize("headers_name", ["admin_headers", "user_headers", "project_owner_member_headers"]) -async def test_storage_v2_can_create_as_admin_or_owner( - sanic_client, create_project, project_members, headers_name, request -) -> None: - headers = request.getfixturevalue(headers_name) - # Create some projects - await create_project("Project 1") - project = await create_project("Project 2", members=project_members) - await create_project("Project 3") - - payload = { - "project_id": project["id"], - "name": "my-storage", - "configuration": { - "type": "s3", - "provider": "AWS", - "region": "us-east-1", - }, - "source_path": "bucket/my-folder", - "target_path": "my/target", - } - - _, response = await sanic_client.post("/api/data/storages_v2", headers=headers, json=payload) - - assert response - assert response.status_code == 201 - assert response.json - assert response.json["storage"]["project_id"] == project["id"] - assert response.json["storage"]["storage_type"] == "s3" - assert response.json["storage"]["name"] == payload["name"] - assert response.json["storage"]["target_path"] == payload["target_path"] - - -@pytest.mark.asyncio -@pytest.mark.parametrize( - "headers_name", ["unauthorized_headers", "project_normal_member_headers", "project_non_member_headers"] -) -async def test_storage_v2_create_cannot_as_unauthorized_or_non_owner_or_non_member( - sanic_client, create_project, project_members, headers_name, request -) -> None: - headers = request.getfixturevalue(headers_name) - # Create some projects - await create_project("Project 1") - project = await create_project("Project 2", members=project_members) - await create_project("Project 3") - - payload = { - "project_id": project["id"], - "name": "my-storage", - "configuration": { - "type": "s3", - "provider": "AWS", - "region": "us-east-1", - }, - "source_path": "bucket/my-folder", - "target_path": "my/target", - } - - _, response = await sanic_client.post("/api/data/storages_v2", headers=headers, json=payload) - - assert response - assert response.status_code == 403 - - -@pytest.mark.asyncio -@pytest.mark.parametrize( - "headers_name", ["admin_headers", "user_headers", "project_normal_member_headers", "project_owner_member_headers"] -) -async def test_storage_v2_can_get_as_admin_or_project_members( - sanic_client, create_storage, create_project, project_members, headers_name, request -) -> None: - headers = request.getfixturevalue(headers_name) - await create_project("Project 1") - project_2 = await create_project("Project 2", members=project_members) - project_3 = await create_project("Project 3", members=project_members) - - project_2_id = project_2["id"] - - await create_storage(project_id=project_2_id) - - _, response = await sanic_client.get(f"/api/data/storages_v2?project_id={project_2_id}", headers=headers) - - assert response.status_code == 200 - assert len(response.json) == 1 - storage = response.json[0]["storage"] - assert storage["project_id"] == project_2_id - assert storage["storage_type"] == "s3" - assert storage["configuration"]["provider"] == "AWS" - - _, response = await sanic_client.get(f"/api/data/storages_v2?project_id={project_3['id']}", headers=headers) - - assert response.status_code == 200 - assert len(response.json) == 0 - - -@pytest.mark.asyncio -@pytest.mark.parametrize("headers_name", ["unauthorized_headers", "project_non_member_headers"]) -async def test_storage_v2_cannot_get_as_unauthorized_or_non_member( - sanic_client, create_storage, create_project, project_members, headers_name, request -) -> None: - headers = request.getfixturevalue(headers_name) - project = await create_project("Project", members=project_members) - project_id = project["id"] - - await create_storage(project_id=project_id) - - _, response = await sanic_client.get(f"/api/data/storages_v2?project_id={project_id}", headers=headers) - - assert response.status_code == 200 - assert len(response.json) == 0 - - -@pytest.mark.asyncio -@pytest.mark.parametrize("headers_name", ["user_headers", "project_owner_member_headers"]) -async def test_storage_v2_can_delete_as_owner(sanic_client, create_storage, headers_name, request) -> None: - headers = request.getfixturevalue(headers_name) - storage = await create_storage() - storage_id = storage["storage"]["storage_id"] - - _, response = await sanic_client.delete(f"/api/data/storages_v2/{storage_id}", headers=headers) - - assert response.status_code == 204 - - _, response = await sanic_client.get(f"/api/data/storages_v2/{storage_id}", headers=headers) - - assert response.status_code == 404 - - -@pytest.mark.asyncio -async def test_storage_v2_cannot_delete_as_normal_member( - sanic_client, create_storage, project_normal_member_headers -) -> None: - storage = await create_storage() - storage_id = storage["storage"]["storage_id"] - - _, response = await sanic_client.delete( - f"/api/data/storages_v2/{storage_id}", headers=project_normal_member_headers - ) - - assert response.status_code == 403 - - _, response = await sanic_client.get(f"/api/data/storages_v2/{storage_id}", headers=project_normal_member_headers) - - assert response.status_code == 200 - - -@pytest.mark.asyncio -@pytest.mark.parametrize("headers_name", ["unauthorized_headers", "project_non_member_headers"]) -async def test_storage_v2_cannot_delete_as_unauthorized_or_non_member( - sanic_client, create_storage, headers_name, request -) -> None: - headers = request.getfixturevalue(headers_name) - storage = await create_storage() - storage_id = storage["storage"]["storage_id"] - - _, response = await sanic_client.delete(f"/api/data/storages_v2/{storage_id}", headers=headers) - - assert response.status_code == 403, response.text - - -@pytest.mark.asyncio -@pytest.mark.parametrize("headers_name", ["user_headers", "project_owner_member_headers"]) -async def test_storage_v2_can_patch_as_owner(sanic_client, create_storage, headers_name, request) -> None: - headers = request.getfixturevalue(headers_name) - storage = await create_storage() - storage_id = storage["storage"]["storage_id"] - - payload = { - "configuration": {"provider": "Other", "region": None, "endpoint": "https://test.com"}, - "source_path": "bucket/my-other-folder", - } - - _, response = await sanic_client.patch(f"/api/data/storages_v2/{storage_id}", headers=headers, json=payload) - - assert response.status_code == 200 - assert response.json["storage"]["configuration"]["provider"] == "Other" - assert response.json["storage"]["source_path"] == "bucket/my-other-folder" - assert "region" not in response.json["storage"]["configuration"] - - -@pytest.mark.asyncio -async def test_storage_v2_cannot_patch_as_normal_member( - sanic_client, create_storage, project_normal_member_headers -) -> None: - storage = await create_storage() - storage_id = storage["storage"]["storage_id"] - - payload = { - "configuration": {"provider": "Other", "region": None, "endpoint": "https://test.com"}, - "source_path": "bucket/my-other-folder", - } - - _, response = await sanic_client.patch( - f"/api/data/storages_v2/{storage_id}", headers=project_normal_member_headers, json=payload - ) - - assert response.status_code == 403 - - _, response = await sanic_client.get(f"/api/data/storages_v2/{storage_id}", headers=project_normal_member_headers) - - assert response.status_code == 200 - storage = response.json["storage"] - assert storage["configuration"]["provider"] == "AWS" - assert response.json["storage"]["source_path"] == "bucket/my-folder" - - -@pytest.mark.asyncio -@pytest.mark.parametrize("headers_name", ["unauthorized_headers", "project_non_member_headers"]) -async def test_storage_v2_cannot_patch_as_unauthorized_or_non_member( - sanic_client, create_storage, headers_name, request -) -> None: - headers = request.getfixturevalue(headers_name) - storage = await create_storage() - storage_id = storage["storage"]["storage_id"] - - payload = { - "configuration": {"provider": "Other", "region": None, "endpoint": "https://test.com"}, - "source_path": "bucket/my-other-folder", - } - - _, response = await sanic_client.patch(f"/api/data/storages_v2/{storage_id}", headers=headers, json=payload) - - assert response.status_code == 403, response.text - - -@pytest.mark.asyncio -async def test_storage_v2_is_deleted_if_project_is_deleted( - sanic_client, create_storage, create_project, user_headers -) -> None: - project = await create_project("Project") - project_id = project["id"] - storage = await create_storage(project_id=project_id) - storage_id = storage["storage"]["storage_id"] - - _, response = await sanic_client.delete(f"/api/data/projects/{project_id}", headers=user_headers) - - assert response.status_code == 204, response.text - - _, response = await sanic_client.get(f"/api/data/storages_v2/{storage_id}", headers=user_headers) - - # NOTE: If storage isn't deleted, the status code will be 401 - assert response.status_code == 404 - - -@pytest.mark.asyncio -async def test_storage_v2_create_secret( - sanic_client, create_storage, project_normal_member_headers, project_owner_member_headers -) -> None: - storage = await create_storage() - storage_id = storage["storage"]["storage_id"] - - payload = [ - {"name": "access_key_id", "value": "access key id value"}, - {"name": "secret_access_key", "value": "secret access key value"}, - ] - - _, response = await sanic_client.post( - f"/api/data/storages_v2/{storage_id}/secrets", headers=project_normal_member_headers, json=payload - ) - - assert response.status_code == 201, response.json - assert {s["name"] for s in response.json} == {"access_key_id", "secret_access_key"}, response.json - created_secret_ids = {s["secret_id"] for s in response.json} - assert len(created_secret_ids) == 2, response.json - - # NOTE: Save secrets for the same storage for another user - payload = [ - {"name": "another_user_secret", "value": "another value"}, - ] - - _, response = await sanic_client.post( - f"/api/data/storages_v2/{storage_id}/secrets", headers=project_owner_member_headers, json=payload - ) - - assert response.status_code == 201, response.json - assert {s["name"] for s in response.json} == {"another_user_secret"}, response.json - - # NOTE: Get secrets for a storage - _, response = await sanic_client.get( - f"/api/data/storages_v2/{storage_id}/secrets", headers=project_normal_member_headers - ) - - assert response.status_code == 200 - assert {s["name"] for s in response.json} == {"access_key_id", "secret_access_key"}, response.json - - # NOTE: Test that saved secrets are returned when getting a specific storage - _, response = await sanic_client.get(f"/api/data/storages_v2/{storage_id}", headers=project_normal_member_headers) - - assert response.status_code == 200 - assert "secrets" in response.json, response.json - assert {s["name"] for s in response.json["secrets"]} == {"access_key_id", "secret_access_key"}, response.json - assert {s["secret_id"] for s in response.json["secrets"]} == created_secret_ids, response.json - - # NOTE: Test that saved secrets are returned when getting all storages in a project - assert "project_id" in storage["storage"], storage - project_id = storage["storage"]["project_id"] - _, response = await sanic_client.get( - f"/api/data/storages_v2?project_id={project_id}", headers=project_normal_member_headers - ) - - assert response.status_code == 200 - assert len(response.json) == 1 - assert "secrets" in response.json[0], response.json - assert {s["name"] for s in response.json[0]["secrets"]} == {"access_key_id", "secret_access_key"}, response.json - assert {s["secret_id"] for s in response.json[0]["secrets"]} == created_secret_ids, response.json - - -@pytest.mark.asyncio -async def test_storage_v2_update_secret(sanic_client, create_storage, project_normal_member_headers) -> None: - storage = await create_storage() - storage_id = storage["storage"]["storage_id"] - - payload = [ - {"name": "access_key_id", "value": "access key id value"}, - {"name": "secret_access_key", "value": "secret access key value"}, - ] - - _, response = await sanic_client.post( - f"/api/data/storages_v2/{storage_id}/secrets", headers=project_normal_member_headers, json=payload - ) - - assert response.status_code == 201, response.json - created_secret_ids = {s["secret_id"] for s in response.json} - - payload = [ - {"name": "access_key_id", "value": "new access key id value"}, - {"name": "secret_access_key", "value": "new secret access key value"}, - ] - - _, response = await sanic_client.post( - f"/api/data/storages_v2/{storage_id}/secrets", headers=project_normal_member_headers, json=payload - ) - - assert response.status_code == 201, response.json - assert {s["name"] for s in response.json} == {"access_key_id", "secret_access_key"}, response.json - assert {s["secret_id"] for s in response.json} == created_secret_ids - - _, response = await sanic_client.get( - f"/api/data/storages_v2/{storage_id}/secrets", headers=project_normal_member_headers - ) - - assert response.status_code == 200 - assert {s["name"] for s in response.json} == {"access_key_id", "secret_access_key"}, response.json - - -@pytest.mark.asyncio -async def test_storage_v2_delete_secret(sanic_client, create_storage, project_normal_member_headers) -> None: - storage = await create_storage() - storage_id = storage["storage"]["storage_id"] - - payload = [ - {"name": "access_key_id", "value": "access key id value"}, - {"name": "secret_access_key", "value": "secret access key value"}, - ] - - _, response = await sanic_client.post( - f"/api/data/storages_v2/{storage_id}/secrets", headers=project_normal_member_headers, json=payload - ) - - assert response.status_code == 201, response.json - - _, response = await sanic_client.delete( - f"/api/data/storages_v2/{storage_id}/secrets", headers=project_normal_member_headers - ) - - assert response.status_code == 204, response.json - - _, response = await sanic_client.get( - f"/api/data/storages_v2/{storage_id}/secrets", headers=project_normal_member_headers - ) - - assert response.status_code == 200 - assert {s["name"] for s in response.json} == set(), response.json - - # NOTE: Test that associated secrets are deleted - _, response = await sanic_client.get( - "/api/data/user/secrets", params={"kind": "storage"}, headers=project_normal_member_headers - ) - - assert response.status_code == 200 - assert response.json == [], response.json - - # TODO: Once saved secret sharing is implemented, add a test that makes sure shared secrets aren't deleted unless - # no other storage is using them diff --git a/test/bases/renku_data_services/data_api/utils.py b/test/bases/renku_data_services/data_api/utils.py index 3f08c39b5..f15040c04 100644 --- a/test/bases/renku_data_services/data_api/utils.py +++ b/test/bases/renku_data_services/data_api/utils.py @@ -1,6 +1,13 @@ import json +import os +import subprocess +from contextlib import AbstractContextManager from typing import Any +import yaml +from kubernetes import client as k8s_client +from kubernetes import config as k8s_config +from kubernetes import watch from sanic import Request from sanic_testing.testing import SanicASGITestClient, TestingResponse @@ -32,3 +39,123 @@ def merge_headers(*headers: dict[str, str]) -> dict[str, str]: for h in headers: all_headers.update(**h) return all_headers + + +class K3DCluster(AbstractContextManager): + """Context manager that will create and tear down a k3s cluster""" + + def __init__( + self, + cluster_name: str, + k3s_image="latest", + kubeconfig=".k3d-config.yaml", + extra_images=[], + ): + self.cluster_name = cluster_name + self.k3s_image = k3s_image + self.extra_images = extra_images + self.kubeconfig = kubeconfig + self.env = os.environ.copy() + self.env["KUBECONFIG"] = self.kubeconfig + + def __enter__(self): + """create kind cluster""" + + create_cluster = [ + "k3d", + "cluster", + "create", + self.cluster_name, + "--agents", + "1", + "--image", + self.k3s_image, + "--no-lb", + "--verbose", + "--wait", + "--k3s-arg", + "--disable=traefik@server:0", + "--k3s-arg", + "--disable=metrics-server@server:0", + ] + + try: + subprocess.run(create_cluster, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=self.env, check=True) + except subprocess.SubprocessError as err: + if err.output is not None: + print(err.output.decode()) + else: + print(err) + raise + + extra_commands = [] + + for extra_image in self.extra_images: + upload_image = [ + "k3d", + "image", + "import", + extra_image, + "-c", + self.cluster_name, + ] + + extra_commands.append(upload_image) + + for command in extra_commands: + try: + subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=self.env, check=True) + except subprocess.SubprocessError as err: + if err.output is not None: + print(err.output.decode()) + else: + print(err) + self._delete_cluster() + raise + + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """delete kind cluster""" + + self._delete_cluster() + return False + + def _delete_cluster(self): + """delete kind cluster""" + + delete_cluster = ["k3d", "cluster", "delete", self.cluster_name] + subprocess.run(delete_cluster, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=self.env, check=True) + + def config_yaml(self): + with open(self.kubeconfig) as f: + return f.read() + + +def setup_amalthea(install_name: str, app_name: str, version: str, cluster: K3DCluster) -> None: + k8s_config.load_kube_config_from_dict(yaml.safe_load(cluster.config_yaml())) + + core_api = k8s_client.CoreV1Api() + + helm_cmds = [ + ["helm", "repo", "add", "renku", "https://swissdatasciencecenter.github.io/helm-charts"], + ["helm", "repo", "update"], + ["helm", "upgrade", "--install", install_name, f"renku/{app_name}", "--version", version], + ] + + for cmd in helm_cmds: + subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=cluster.env, check=True) + + watcher = watch.Watch() + + for event in watcher.stream( + core_api.list_namespaced_pod, + label_selector=f"app.kubernetes.io/name={app_name}", + namespace="default", + timeout_seconds=60, + ): + if event["object"].status.phase == "Running": + watcher.stop() + break + else: + assert False, "Timeout waiting on amalthea to run" diff --git a/test/components/renku_data_services/data_api/test_config.py b/test/components/renku_data_services/data_api/test_config.py index d0ba3ee17..20bb78542 100644 --- a/test/components/renku_data_services/data_api/test_config.py +++ b/test/components/renku_data_services/data_api/test_config.py @@ -77,6 +77,7 @@ def patch_kc_api(*args, **kwargs): DBConfig._async_engine = None +@pytest.mark.skip(reason="Re-enable when the k8s cluster for CI is fully setup") # TODO: address in followup PR def test_config_no_dummy(config_no_dummy_fixture: conf.Config) -> None: config = config_no_dummy_fixture assert config.authenticator is not None diff --git a/test/components/renku_data_services/db/test_sqlalchemy_storage_repo.py b/test/components/renku_data_services/db/test_sqlalchemy_storage_repo.py index 479232be9..46da44210 100644 --- a/test/components/renku_data_services/db/test_sqlalchemy_storage_repo.py +++ b/test/components/renku_data_services/db/test_sqlalchemy_storage_repo.py @@ -19,48 +19,35 @@ from test.utils import create_storage -def get_user(storage, valid: bool = True): - """Get an api user for a storage.""" - if valid: - user = APIUser( - is_admin=True, - id="abcdefg", - access_token="abcdefg", - full_name="John Doe", # nosec: B106 - ) - user._admin_project_id = storage.get("project_id") - else: - user = APIUser( - is_admin=True, - id="abcdefg", - access_token="abcdefg", - full_name="John Doe", # nosec: B106 - ) - user._admin_project_id = storage.get("project_id") + "0" - user._member_project_id = storage.get("project_id") + "0" - return user +@pytest.fixture() +def user(): + return APIUser( + is_admin=True, + id="abcdefg", + access_token="abcdefg", + full_name="John Doe", # nosec: B106 + ) @given(storage=storage_strat()) @settings(suppress_health_check=[HealthCheck.function_scoped_fixture], deadline=None) @pytest.mark.asyncio -async def test_storage_insert_get(storage: dict[str, Any], app_config: Config) -> None: +async def test_storage_insert_get(storage: dict[str, Any], app_config: Config, user: APIUser) -> None: run_migrations_for_app("common") storage_repo = app_config.storage_repo with contextlib.suppress(ValidationError, errors.ValidationError): - await create_storage(storage, storage_repo, user=get_user(storage)) + await create_storage(storage, storage_repo, user=user) @given(storage=storage_strat(), new_source_path=a_path, new_target_path=a_path) @settings(suppress_health_check=[HealthCheck.function_scoped_fixture], deadline=None) @pytest.mark.asyncio async def test_storage_update_path( - storage: dict[str, Any], new_source_path: str, new_target_path: str, app_config: Config + storage: dict[str, Any], new_source_path: str, new_target_path: str, app_config: Config, user: APIUser ) -> None: run_migrations_for_app("common") storage_repo = app_config.storage_repo try: - user = user = get_user(storage) inserted_storage = await create_storage(storage, storage_repo, user) assert inserted_storage.storage_id is not None @@ -77,11 +64,12 @@ async def test_storage_update_path( @given(storage=storage_strat(), new_config=st.one_of(s3_configuration(), azure_configuration())) @settings(suppress_health_check=[HealthCheck.function_scoped_fixture], deadline=None) @pytest.mark.asyncio -async def test_storage_update_config(storage: dict[str, Any], new_config: dict[str, Any], app_config: Config) -> None: +async def test_storage_update_config( + storage: dict[str, Any], new_config: dict[str, Any], app_config: Config, user: APIUser +) -> None: run_migrations_for_app("common") storage_repo = app_config.storage_repo try: - user = user = get_user(storage) inserted_storage = await create_storage(storage, storage_repo, user) assert inserted_storage.storage_id is not None @@ -97,11 +85,10 @@ async def test_storage_update_config(storage: dict[str, Any], new_config: dict[s @given(storage=storage_strat()) @settings(suppress_health_check=[HealthCheck.function_scoped_fixture], deadline=None) @pytest.mark.asyncio -async def test_storage_delete(storage: dict[str, Any], app_config: Config) -> None: +async def test_storage_delete(storage: dict[str, Any], app_config: Config, user: APIUser) -> None: run_migrations_for_app("common") storage_repo = app_config.storage_repo try: - user = user = get_user(storage) inserted_storage = await create_storage(storage, storage_repo, user) assert inserted_storage.storage_id is not None await storage_repo.delete_storage(storage_id=inserted_storage.storage_id, user=user)