From 3f33759b64f6730910b03ba7c64df5908052377e Mon Sep 17 00:00:00 2001 From: Divyansh Vijayvergia Date: Thu, 20 Nov 2025 16:41:42 +0000 Subject: [PATCH 1/2] test generation --- .codegen/_openapi_sha | 2 +- .gitattributes | 1 + databricks/sdk/__init__.py | 35 +- databricks/sdk/service/apps.py | 159 ++ databricks/sdk/service/billing.py | 253 ++- databricks/sdk/service/catalog.py | 251 ++- databricks/sdk/service/cleanrooms.py | 27 + databricks/sdk/service/compute.py | 528 ++++- databricks/sdk/service/dashboards.py | 705 +++++- databricks/sdk/service/database.py | 146 +- databricks/sdk/service/dataquality.py | 44 +- databricks/sdk/service/iam.py | 5 +- databricks/sdk/service/iamv2.py | 1005 ++++++++- databricks/sdk/service/jobs.py | 155 +- databricks/sdk/service/ml.py | 108 +- databricks/sdk/service/pipelines.py | 254 ++- databricks/sdk/service/postgres.py | 1974 +++++++++++++++++ databricks/sdk/service/qualitymonitorv2.py | 14 + databricks/sdk/service/settings.py | 152 +- databricks/sdk/service/sharing.py | 115 +- databricks/sdk/service/sql.py | 17 + databricks/sdk/service/tags.py | 233 ++ databricks/sdk/service/vectorsearch.py | 595 ++++- databricks/sdk/service/workspace.py | 33 +- docs/account/billing/budget_policy.rst | 4 +- docs/account/billing/index.rst | 3 +- docs/account/billing/usage_policy.rst | 69 + docs/account/iam/groups_v2.rst | 5 +- docs/account/iam/workspace_assignment.rst | 4 +- docs/account/iamv2/iam_v2.rst | 226 ++ docs/account/provisioning/credentials.rst | 6 +- docs/account/provisioning/storage.rst | 5 +- docs/dbdataclasses/apps.rst | 8 + docs/dbdataclasses/billing.rst | 8 + docs/dbdataclasses/catalog.rst | 36 +- docs/dbdataclasses/compute.rst | 54 + docs/dbdataclasses/dashboards.rst | 83 + docs/dbdataclasses/dataquality.rst | 10 + docs/dbdataclasses/iamv2.rst | 16 + docs/dbdataclasses/index.rst | 1 + docs/dbdataclasses/jobs.rst | 15 + docs/dbdataclasses/ml.rst | 8 + docs/dbdataclasses/pipelines.rst | 28 + docs/dbdataclasses/postgres.rst | 342 +++ docs/dbdataclasses/qualitymonitorv2.rst | 8 + docs/dbdataclasses/settings.rst | 20 + docs/dbdataclasses/tags.rst | 8 + docs/dbdataclasses/vectorsearch.rst | 71 + docs/dbdataclasses/workspace.rst | 8 + docs/gen-client-docs.py | 5 + docs/workspace/catalog/catalogs.rst | 15 +- docs/workspace/catalog/connections.rst | 8 +- docs/workspace/catalog/external_locations.rst | 13 +- docs/workspace/catalog/grants.rst | 4 +- docs/workspace/catalog/rfa.rst | 8 +- .../workspace/catalog/storage_credentials.rst | 6 +- docs/workspace/catalog/tables.rst | 2 +- docs/workspace/compute/instance_pools.rst | 18 +- docs/workspace/compute/libraries.rst | 77 + docs/workspace/dashboards/genie.rst | 85 +- docs/workspace/dashboards/index.rst | 3 +- .../dashboards/lakeview_embedded.rst | 10 + docs/workspace/database/database.rst | 28 +- docs/workspace/iam/current_user.rst | 2 +- docs/workspace/iamv2/workspace_iam_v2.rst | 218 ++ docs/workspace/index.rst | 1 + docs/workspace/jobs/jobs.rst | 42 +- docs/workspace/ml/experiments.rst | 10 + docs/workspace/ml/model_registry.rst | 13 +- docs/workspace/pipelines/pipelines.rst | 29 +- docs/workspace/postgres/index.rst | 10 + docs/workspace/postgres/postgres.rst | 232 ++ docs/workspace/settings/tokens.rst | 4 +- .../sharing/recipient_federation_policies.rst | 20 + docs/workspace/sharing/shares.rst | 12 +- docs/workspace/sql/queries.rst | 2 +- docs/workspace/tags/index.rst | 1 + docs/workspace/tags/tag_assignments.rst | 85 + .../vectorsearch/vector_search_endpoints.rst | 52 +- .../vectorsearch/vector_search_indexes.rst | 10 + docs/workspace/workspace/workspace.rst | 25 +- tests/databricks/sdk/service/lrotesting.py | 46 +- tests/generated/test_json_marshall.py | 3 +- 83 files changed, 8771 insertions(+), 185 deletions(-) create mode 100755 databricks/sdk/service/postgres.py create mode 100644 docs/account/billing/usage_policy.rst create mode 100644 docs/dbdataclasses/postgres.rst create mode 100644 docs/workspace/postgres/index.rst create mode 100644 docs/workspace/postgres/postgres.rst create mode 100644 docs/workspace/tags/tag_assignments.rst diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index ad1e20e88..39c153e74 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -27cebd58ae24e19c95c675db3a93b6046abaca2a \ No newline at end of file +universe:/home/divyansh.vijayvergia/universe \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 28ea4a477..6a827bc4d 100755 --- a/.gitattributes +++ b/.gitattributes @@ -22,6 +22,7 @@ databricks/sdk/service/marketplace.py linguist-generated=true databricks/sdk/service/ml.py linguist-generated=true databricks/sdk/service/oauth2.py linguist-generated=true databricks/sdk/service/pipelines.py linguist-generated=true +databricks/sdk/service/postgres.py linguist-generated=true databricks/sdk/service/provisioning.py linguist-generated=true databricks/sdk/service/qualitymonitorv2.py linguist-generated=true databricks/sdk/service/serving.py linguist-generated=true diff --git a/databricks/sdk/__init__.py b/databricks/sdk/__init__.py index da0509a95..fe5e345d9 100755 --- a/databricks/sdk/__init__.py +++ b/databricks/sdk/__init__.py @@ -32,6 +32,7 @@ from databricks.sdk.service import ml as pkg_ml from databricks.sdk.service import oauth2 as pkg_oauth2 from databricks.sdk.service import pipelines as pkg_pipelines +from databricks.sdk.service import postgres as pkg_postgres from databricks.sdk.service import provisioning as pkg_provisioning from databricks.sdk.service import qualitymonitorv2 as pkg_qualitymonitorv2 from databricks.sdk.service import serving as pkg_serving @@ -46,7 +47,7 @@ from databricks.sdk.service.apps import AppsAPI, AppsSettingsAPI from databricks.sdk.service.billing import (BillableUsageAPI, BudgetPolicyAPI, BudgetsAPI, LogDeliveryAPI, - UsageDashboardsAPI) + UsageDashboardsAPI, UsagePolicyAPI) from databricks.sdk.service.catalog import (AccountMetastoreAssignmentsAPI, AccountMetastoresAPI, AccountStorageCredentialsAPI, @@ -80,7 +81,8 @@ PolicyComplianceForClustersAPI, PolicyFamiliesAPI) from databricks.sdk.service.dashboards import (GenieAPI, LakeviewAPI, - LakeviewEmbeddedAPI) + LakeviewEmbeddedAPI, + QueryExecutionAPI) from databricks.sdk.service.database import DatabaseAPI from databricks.sdk.service.dataquality import DataQualityAPI from databricks.sdk.service.files import DbfsAPI, FilesAPI @@ -116,6 +118,7 @@ ServicePrincipalSecretsAPI, ServicePrincipalSecretsProxyAPI) from databricks.sdk.service.pipelines import PipelinesAPI +from databricks.sdk.service.postgres import PostgresAPI from databricks.sdk.service.provisioning import (CredentialsAPI, EncryptionKeysAPI, NetworksAPI, PrivateAccessAPI, @@ -156,7 +159,7 @@ QueryVisualizationsLegacyAPI, RedashConfigAPI, StatementExecutionAPI, WarehousesAPI) -from databricks.sdk.service.tags import TagPoliciesAPI +from databricks.sdk.service.tags import TagAssignmentsAPI, TagPoliciesAPI from databricks.sdk.service.vectorsearch import (VectorSearchEndpointsAPI, VectorSearchIndexesAPI) from databricks.sdk.service.workspace import (GitCredentialsAPI, ReposAPI, @@ -330,6 +333,7 @@ def __init__( self._policy_compliance_for_clusters = pkg_compute.PolicyComplianceForClustersAPI(self._api_client) self._policy_compliance_for_jobs = pkg_jobs.PolicyComplianceForJobsAPI(self._api_client) self._policy_families = pkg_compute.PolicyFamiliesAPI(self._api_client) + self._postgres = pkg_postgres.PostgresAPI(self._api_client) self._provider_exchange_filters = pkg_marketplace.ProviderExchangeFiltersAPI(self._api_client) self._provider_exchanges = pkg_marketplace.ProviderExchangesAPI(self._api_client) self._provider_files = pkg_marketplace.ProviderFilesAPI(self._api_client) @@ -344,6 +348,7 @@ def __init__( self._quality_monitors = pkg_catalog.QualityMonitorsAPI(self._api_client) self._queries = pkg_sql.QueriesAPI(self._api_client) self._queries_legacy = pkg_sql.QueriesLegacyAPI(self._api_client) + self._query_execution = pkg_dashboards.QueryExecutionAPI(self._api_client) self._query_history = pkg_sql.QueryHistoryAPI(self._api_client) self._query_visualizations = pkg_sql.QueryVisualizationsAPI(self._api_client) self._query_visualizations_legacy = pkg_sql.QueryVisualizationsLegacyAPI(self._api_client) @@ -373,6 +378,7 @@ def __init__( self._system_schemas = pkg_catalog.SystemSchemasAPI(self._api_client) self._table_constraints = pkg_catalog.TableConstraintsAPI(self._api_client) self._tables = pkg_catalog.TablesAPI(self._api_client) + self._tag_assignments = pkg_tags.TagAssignmentsAPI(self._api_client) self._tag_policies = pkg_tags.TagPoliciesAPI(self._api_client) self._temporary_path_credentials = pkg_catalog.TemporaryPathCredentialsAPI(self._api_client) self._temporary_table_credentials = pkg_catalog.TemporaryTableCredentialsAPI(self._api_client) @@ -744,6 +750,11 @@ def policy_families(self) -> pkg_compute.PolicyFamiliesAPI: """View available policy families.""" return self._policy_families + @property + def postgres(self) -> pkg_postgres.PostgresAPI: + """The Postgres API provides access to a Postgres database via REST API or direct SQL.""" + return self._postgres + @property def provider_exchange_filters(self) -> pkg_marketplace.ProviderExchangeFiltersAPI: """Marketplace exchanges filters curate which groups can access an exchange.""" @@ -804,6 +815,11 @@ def queries_legacy(self) -> pkg_sql.QueriesLegacyAPI: """These endpoints are used for CRUD operations on query definitions.""" return self._queries_legacy + @property + def query_execution(self) -> pkg_dashboards.QueryExecutionAPI: + """Query execution APIs for AI / BI Dashboards.""" + return self._query_execution + @property def query_history(self) -> pkg_sql.QueryHistoryAPI: """A service responsible for storing and retrieving the list of queries run against SQL endpoints and serverless compute.""" @@ -856,7 +872,7 @@ def resource_quotas(self) -> pkg_catalog.ResourceQuotasAPI: @property def rfa(self) -> pkg_catalog.RfaAPI: - """Request for Access enables customers to request access to and manage access request destinations for Unity Catalog securables.""" + """Request for Access enables users to request access for Unity Catalog securables.""" return self._rfa @property @@ -924,6 +940,11 @@ def tables(self) -> pkg_catalog.TablesAPI: """A table resides in the third layer of Unity Catalog’s three-level namespace.""" return self._tables + @property + def tag_assignments(self) -> pkg_tags.TagAssignmentsAPI: + """Manage tag assignments on workspace-scoped objects.""" + return self._tag_assignments + @property def tag_policies(self) -> pkg_tags.TagPoliciesAPI: """The Tag Policy API allows you to manage policies for governed tags in Databricks.""" @@ -1121,6 +1142,7 @@ def __init__( self._storage = pkg_provisioning.StorageAPI(self._api_client) self._storage_credentials = pkg_catalog.AccountStorageCredentialsAPI(self._api_client) self._usage_dashboards = pkg_billing.UsageDashboardsAPI(self._api_client) + self._usage_policy = pkg_billing.UsagePolicyAPI(self._api_client) self._users_v2 = pkg_iam.AccountUsersV2API(self._api_client) self._vpc_endpoints = pkg_provisioning.VpcEndpointsAPI(self._api_client) self._workspace_assignment = pkg_iam.WorkspaceAssignmentAPI(self._api_client) @@ -1278,6 +1300,11 @@ def usage_dashboards(self) -> pkg_billing.UsageDashboardsAPI: """These APIs manage usage dashboards for this account.""" return self._usage_dashboards + @property + def usage_policy(self) -> pkg_billing.UsagePolicyAPI: + """A service serves REST API about Usage policies.""" + return self._usage_policy + @property def users_v2(self) -> pkg_iam.AccountUsersV2API: """User identities recognized by Databricks and represented by email addresses.""" diff --git a/databricks/sdk/service/apps.py b/databricks/sdk/service/apps.py index f22edec9b..c3ff4a8ee 100755 --- a/databricks/sdk/service/apps.py +++ b/databricks/sdk/service/apps.py @@ -45,6 +45,8 @@ class App: creator: Optional[str] = None """The email of the user that created the app.""" + default_git_source: Optional[GitSource] = None + default_source_code_path: Optional[str] = None """The default workspace file system path of the source code from which app deployment are created. This field tracks the workspace source code path of the last active deployment.""" @@ -54,6 +56,8 @@ class App: effective_budget_policy_id: Optional[str] = None + effective_usage_policy_id: Optional[str] = None + effective_user_api_scopes: Optional[List[str]] = None """The effective api scopes granted to the user access token.""" @@ -86,6 +90,8 @@ class App: url: Optional[str] = None """The URL of the app once it is deployed.""" + usage_policy_id: Optional[str] = None + user_api_scopes: Optional[List[str]] = None def as_dict(self) -> dict: @@ -105,12 +111,16 @@ def as_dict(self) -> dict: body["create_time"] = self.create_time if self.creator is not None: body["creator"] = self.creator + if self.default_git_source: + body["default_git_source"] = self.default_git_source.as_dict() if self.default_source_code_path is not None: body["default_source_code_path"] = self.default_source_code_path if self.description is not None: body["description"] = self.description if self.effective_budget_policy_id is not None: body["effective_budget_policy_id"] = self.effective_budget_policy_id + if self.effective_usage_policy_id is not None: + body["effective_usage_policy_id"] = self.effective_usage_policy_id if self.effective_user_api_scopes: body["effective_user_api_scopes"] = [v for v in self.effective_user_api_scopes] if self.id is not None: @@ -137,6 +147,8 @@ def as_dict(self) -> dict: body["updater"] = self.updater if self.url is not None: body["url"] = self.url + if self.usage_policy_id is not None: + body["usage_policy_id"] = self.usage_policy_id if self.user_api_scopes: body["user_api_scopes"] = [v for v in self.user_api_scopes] return body @@ -158,12 +170,16 @@ def as_shallow_dict(self) -> dict: body["create_time"] = self.create_time if self.creator is not None: body["creator"] = self.creator + if self.default_git_source: + body["default_git_source"] = self.default_git_source if self.default_source_code_path is not None: body["default_source_code_path"] = self.default_source_code_path if self.description is not None: body["description"] = self.description if self.effective_budget_policy_id is not None: body["effective_budget_policy_id"] = self.effective_budget_policy_id + if self.effective_usage_policy_id is not None: + body["effective_usage_policy_id"] = self.effective_usage_policy_id if self.effective_user_api_scopes: body["effective_user_api_scopes"] = self.effective_user_api_scopes if self.id is not None: @@ -190,6 +206,8 @@ def as_shallow_dict(self) -> dict: body["updater"] = self.updater if self.url is not None: body["url"] = self.url + if self.usage_policy_id is not None: + body["usage_policy_id"] = self.usage_policy_id if self.user_api_scopes: body["user_api_scopes"] = self.user_api_scopes return body @@ -205,9 +223,11 @@ def from_dict(cls, d: Dict[str, Any]) -> App: compute_status=_from_dict(d, "compute_status", ComputeStatus), create_time=d.get("create_time", None), creator=d.get("creator", None), + default_git_source=_from_dict(d, "default_git_source", GitSource), default_source_code_path=d.get("default_source_code_path", None), description=d.get("description", None), effective_budget_policy_id=d.get("effective_budget_policy_id", None), + effective_usage_policy_id=d.get("effective_usage_policy_id", None), effective_user_api_scopes=d.get("effective_user_api_scopes", None), id=d.get("id", None), name=d.get("name", None), @@ -221,6 +241,7 @@ def from_dict(cls, d: Dict[str, Any]) -> App: update_time=d.get("update_time", None), updater=d.get("updater", None), url=d.get("url", None), + usage_policy_id=d.get("usage_policy_id", None), user_api_scopes=d.get("user_api_scopes", None), ) @@ -336,6 +357,8 @@ def from_dict(cls, d: Dict[str, Any]) -> AppAccessControlResponse: @dataclass class AppDeployment: + command: Optional[List[str]] = None + create_time: Optional[str] = None """The creation time of the deployment. Formatted timestamp in ISO 6801.""" @@ -348,6 +371,11 @@ class AppDeployment: deployment_id: Optional[str] = None """The unique id of the deployment.""" + env_vars: Optional[List[EnvVar]] = None + + git_source: Optional[GitSource] = None + """Git repository to use as the source for the app deployment.""" + mode: Optional[AppDeploymentMode] = None """The mode of which the deployment will manage the source code.""" @@ -367,6 +395,8 @@ class AppDeployment: def as_dict(self) -> dict: """Serializes the AppDeployment into a dictionary suitable for use as a JSON request body.""" body = {} + if self.command: + body["command"] = [v for v in self.command] if self.create_time is not None: body["create_time"] = self.create_time if self.creator is not None: @@ -375,6 +405,10 @@ def as_dict(self) -> dict: body["deployment_artifacts"] = self.deployment_artifacts.as_dict() if self.deployment_id is not None: body["deployment_id"] = self.deployment_id + if self.env_vars: + body["env_vars"] = [v.as_dict() for v in self.env_vars] + if self.git_source: + body["git_source"] = self.git_source.as_dict() if self.mode is not None: body["mode"] = self.mode.value if self.source_code_path is not None: @@ -388,6 +422,8 @@ def as_dict(self) -> dict: def as_shallow_dict(self) -> dict: """Serializes the AppDeployment into a shallow dictionary of its immediate attributes.""" body = {} + if self.command: + body["command"] = self.command if self.create_time is not None: body["create_time"] = self.create_time if self.creator is not None: @@ -396,6 +432,10 @@ def as_shallow_dict(self) -> dict: body["deployment_artifacts"] = self.deployment_artifacts if self.deployment_id is not None: body["deployment_id"] = self.deployment_id + if self.env_vars: + body["env_vars"] = self.env_vars + if self.git_source: + body["git_source"] = self.git_source if self.mode is not None: body["mode"] = self.mode if self.source_code_path is not None: @@ -410,10 +450,13 @@ def as_shallow_dict(self) -> dict: def from_dict(cls, d: Dict[str, Any]) -> AppDeployment: """Deserializes the AppDeployment from a dictionary.""" return cls( + command=d.get("command", None), create_time=d.get("create_time", None), creator=d.get("creator", None), deployment_artifacts=_from_dict(d, "deployment_artifacts", AppDeploymentArtifacts), deployment_id=d.get("deployment_id", None), + env_vars=_repeated_dict(d, "env_vars", EnvVar), + git_source=_from_dict(d, "git_source", GitSource), mode=_enum(d, "mode", AppDeploymentMode), source_code_path=d.get("source_code_path", None), status=_from_dict(d, "status", AppDeploymentStatus), @@ -1596,6 +1639,42 @@ def from_dict(cls, d: Dict[str, Any]) -> CustomTemplate: ) +@dataclass +class EnvVar: + name: Optional[str] = None + + value: Optional[str] = None + + value_from: Optional[str] = None + + def as_dict(self) -> dict: + """Serializes the EnvVar into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.name is not None: + body["name"] = self.name + if self.value is not None: + body["value"] = self.value + if self.value_from is not None: + body["value_from"] = self.value_from + return body + + def as_shallow_dict(self) -> dict: + """Serializes the EnvVar into a shallow dictionary of its immediate attributes.""" + body = {} + if self.name is not None: + body["name"] = self.name + if self.value is not None: + body["value"] = self.value + if self.value_from is not None: + body["value_from"] = self.value_from + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> EnvVar: + """Deserializes the EnvVar from a dictionary.""" + return cls(name=d.get("name", None), value=d.get("value", None), value_from=d.get("value_from", None)) + + @dataclass class GetAppPermissionLevelsResponse: permission_levels: Optional[List[AppPermissionsDescription]] = None @@ -1621,6 +1700,86 @@ def from_dict(cls, d: Dict[str, Any]) -> GetAppPermissionLevelsResponse: return cls(permission_levels=_repeated_dict(d, "permission_levels", AppPermissionsDescription)) +@dataclass +class GitSource: + url: str + """URL of the Git repository.""" + + provider: str + """Git provider. Case insensitive. Supported values: gitHub, gitHubEnterprise, bitbucketCloud, + bitbucketServer, azureDevOpsServices, gitLab, gitLabEnterpriseEdition, awsCodeCommit.""" + + branch: Optional[str] = None + """Git branch to checkout.""" + + commit: Optional[str] = None + """Git commit SHA to checkout.""" + + resolved_commit: Optional[str] = None + """The resolved commit SHA that was actually used for the deployment. This is populated by the + system after resolving the reference (branch, tag, or commit). If commit is specified directly, + this will match commit. If a branch or tag is specified, this contains the commit SHA that the + branch or tag pointed to at deployment time.""" + + source_code_path: Optional[str] = None + """Relative path to the app source code within the Git repository. If not specified, the root of + the repository is used.""" + + tag: Optional[str] = None + """Git tag to checkout.""" + + def as_dict(self) -> dict: + """Serializes the GitSource into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.branch is not None: + body["branch"] = self.branch + if self.commit is not None: + body["commit"] = self.commit + if self.provider is not None: + body["provider"] = self.provider + if self.resolved_commit is not None: + body["resolved_commit"] = self.resolved_commit + if self.source_code_path is not None: + body["source_code_path"] = self.source_code_path + if self.tag is not None: + body["tag"] = self.tag + if self.url is not None: + body["url"] = self.url + return body + + def as_shallow_dict(self) -> dict: + """Serializes the GitSource into a shallow dictionary of its immediate attributes.""" + body = {} + if self.branch is not None: + body["branch"] = self.branch + if self.commit is not None: + body["commit"] = self.commit + if self.provider is not None: + body["provider"] = self.provider + if self.resolved_commit is not None: + body["resolved_commit"] = self.resolved_commit + if self.source_code_path is not None: + body["source_code_path"] = self.source_code_path + if self.tag is not None: + body["tag"] = self.tag + if self.url is not None: + body["url"] = self.url + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> GitSource: + """Deserializes the GitSource from a dictionary.""" + return cls( + branch=d.get("branch", None), + commit=d.get("commit", None), + provider=d.get("provider", None), + resolved_commit=d.get("resolved_commit", None), + source_code_path=d.get("source_code_path", None), + tag=d.get("tag", None), + url=d.get("url", None), + ) + + @dataclass class ListAppDeploymentsResponse: app_deployments: Optional[List[AppDeployment]] = None diff --git a/databricks/sdk/service/billing.py b/databricks/sdk/service/billing.py index 77ef2f792..18704daa1 100755 --- a/databricks/sdk/service/billing.py +++ b/databricks/sdk/service/billing.py @@ -1034,6 +1034,50 @@ def from_dict(cls, d: Dict[str, Any]) -> ListBudgetPoliciesResponse: ) +@dataclass +class ListUsagePoliciesResponse: + """A list of usage policies.""" + + next_page_token: Optional[str] = None + """A token that can be sent as `page_token` to retrieve the next page.""" + + policies: Optional[List[UsagePolicy]] = None + + previous_page_token: Optional[str] = None + """A token that can be sent as `page_token` to retrieve the previous page.""" + + def as_dict(self) -> dict: + """Serializes the ListUsagePoliciesResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + if self.policies: + body["policies"] = [v.as_dict() for v in self.policies] + if self.previous_page_token is not None: + body["previous_page_token"] = self.previous_page_token + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ListUsagePoliciesResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + if self.policies: + body["policies"] = self.policies + if self.previous_page_token is not None: + body["previous_page_token"] = self.previous_page_token + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ListUsagePoliciesResponse: + """Deserializes the ListUsagePoliciesResponse from a dictionary.""" + return cls( + next_page_token=d.get("next_page_token", None), + policies=_repeated_dict(d, "policies", UsagePolicy), + previous_page_token=d.get("previous_page_token", None), + ) + + class LogDeliveryConfigStatus(Enum): """* Log Delivery Status @@ -1434,6 +1478,59 @@ class UsageDashboardType(Enum): USAGE_DASHBOARD_TYPE_WORKSPACE = "USAGE_DASHBOARD_TYPE_WORKSPACE" +@dataclass +class UsagePolicy: + """Contains the UsagePolicy details (same structure as BudgetPolicy)""" + + binding_workspace_ids: Optional[List[int]] = None + """List of workspaces that this usage policy will be exclusively bound to.""" + + custom_tags: Optional[List[compute.CustomPolicyTag]] = None + """A list of tags defined by the customer. At most 20 entries are allowed per policy.""" + + policy_id: Optional[str] = None + """The Id of the policy. This field is generated by Databricks and globally unique.""" + + policy_name: Optional[str] = None + """The name of the policy.""" + + def as_dict(self) -> dict: + """Serializes the UsagePolicy into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.binding_workspace_ids: + body["binding_workspace_ids"] = [v for v in self.binding_workspace_ids] + if self.custom_tags: + body["custom_tags"] = [v.as_dict() for v in self.custom_tags] + if self.policy_id is not None: + body["policy_id"] = self.policy_id + if self.policy_name is not None: + body["policy_name"] = self.policy_name + return body + + def as_shallow_dict(self) -> dict: + """Serializes the UsagePolicy into a shallow dictionary of its immediate attributes.""" + body = {} + if self.binding_workspace_ids: + body["binding_workspace_ids"] = self.binding_workspace_ids + if self.custom_tags: + body["custom_tags"] = self.custom_tags + if self.policy_id is not None: + body["policy_id"] = self.policy_id + if self.policy_name is not None: + body["policy_name"] = self.policy_name + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> UsagePolicy: + """Deserializes the UsagePolicy from a dictionary.""" + return cls( + binding_workspace_ids=d.get("binding_workspace_ids", None), + custom_tags=_repeated_dict(d, "custom_tags", compute.CustomPolicyTag), + policy_id=d.get("policy_id", None), + policy_name=d.get("policy_name", None), + ) + + @dataclass class WrappedLogDeliveryConfiguration: log_delivery_configuration: Optional[LogDeliveryConfiguration] = None @@ -1669,7 +1766,12 @@ def list( query["page_token"] = json["next_page_token"] def update( - self, policy_id: str, policy: BudgetPolicy, *, limit_config: Optional[LimitConfig] = None + self, + policy_id: str, + policy: BudgetPolicy, + *, + limit_config: Optional[LimitConfig] = None, + update_mask: Optional[str] = None, ) -> BudgetPolicy: """Updates a policy @@ -1680,6 +1782,8 @@ def update( specified even if not changed. The `policy_id` is used to identify the policy to update. :param limit_config: :class:`LimitConfig` (optional) DEPRECATED. This is redundant field as LimitConfig is part of the BudgetPolicy + :param update_mask: str (optional) + Field mask specifying which fields to update. When not provided, all fields are updated. :returns: :class:`BudgetPolicy` """ @@ -1688,6 +1792,8 @@ def update( query = {} if limit_config is not None: query["limit_config"] = limit_config.as_dict() + if update_mask is not None: + query["update_mask"] = update_mask headers = { "Accept": "application/json", "Content-Type": "application/json", @@ -2074,3 +2180,148 @@ def get( res = self._api.do("GET", f"/api/2.0/accounts/{self._api.account_id}/dashboard", query=query, headers=headers) return GetBillingUsageDashboardResponse.from_dict(res) + + +class UsagePolicyAPI: + """A service serves REST API about Usage policies""" + + def __init__(self, api_client): + self._api = api_client + + def create(self, *, policy: Optional[UsagePolicy] = None, request_id: Optional[str] = None) -> UsagePolicy: + """Creates a new usage policy. + + :param policy: :class:`UsagePolicy` (optional) + The policy to create. `policy_id` needs to be empty as it will be generated + :param request_id: str (optional) + A unique identifier for this request. Restricted to 36 ASCII characters. + + :returns: :class:`UsagePolicy` + """ + + if request_id is None or request_id == "": + request_id = str(uuid.uuid4()) + body = {} + if policy is not None: + body["policy"] = policy.as_dict() + if request_id is not None: + body["request_id"] = request_id + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "POST", f"/api/2.1/accounts/{self._api.account_id}/usage-policies", body=body, headers=headers + ) + return UsagePolicy.from_dict(res) + + def delete(self, policy_id: str): + """Deletes a usage policy + + :param policy_id: str + The Id of the policy. + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do("DELETE", f"/api/2.1/accounts/{self._api.account_id}/usage-policies/{policy_id}", headers=headers) + + def get(self, policy_id: str) -> UsagePolicy: + """Retrieves a usage policy by it's ID. + + :param policy_id: str + The Id of the policy. + + :returns: :class:`UsagePolicy` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do( + "GET", f"/api/2.1/accounts/{self._api.account_id}/usage-policies/{policy_id}", headers=headers + ) + return UsagePolicy.from_dict(res) + + def list( + self, + *, + filter_by: Optional[Filter] = None, + page_size: Optional[int] = None, + page_token: Optional[str] = None, + sort_spec: Optional[SortSpec] = None, + ) -> Iterator[UsagePolicy]: + """Lists all usage policies. Policies are returned in the alphabetically ascending order of their names. + + :param filter_by: :class:`Filter` (optional) + A filter to apply to the list of policies. + :param page_size: int (optional) + The maximum number of usage policies to return. + :param page_token: str (optional) + A page token, received from a previous `ListUsagePolicies` call. + :param sort_spec: :class:`SortSpec` (optional) + The sort specification. + + :returns: Iterator over :class:`UsagePolicy` + """ + + query = {} + if filter_by is not None: + query["filter_by"] = filter_by.as_dict() + if page_size is not None: + query["page_size"] = page_size + if page_token is not None: + query["page_token"] = page_token + if sort_spec is not None: + query["sort_spec"] = sort_spec.as_dict() + headers = { + "Accept": "application/json", + } + + while True: + json = self._api.do( + "GET", f"/api/2.1/accounts/{self._api.account_id}/usage-policies", query=query, headers=headers + ) + if "policies" in json: + for v in json["policies"]: + yield UsagePolicy.from_dict(v) + if "next_page_token" not in json or not json["next_page_token"]: + return + query["page_token"] = json["next_page_token"] + + def update(self, policy_id: str, policy: UsagePolicy, *, limit_config: Optional[LimitConfig] = None) -> UsagePolicy: + """Updates a usage policy + + :param policy_id: str + The Id of the policy. This field is generated by Databricks and globally unique. + :param policy: :class:`UsagePolicy` + The policy to update. `creator_user_id` cannot be specified in the request. + :param limit_config: :class:`LimitConfig` (optional) + DEPRECATED. This is redundant field as LimitConfig is part of the UsagePolicy + + :returns: :class:`UsagePolicy` + """ + + body = policy.as_dict() + query = {} + if limit_config is not None: + query["limit_config"] = limit_config.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "PATCH", + f"/api/2.1/accounts/{self._api.account_id}/usage-policies/{policy_id}", + query=query, + body=body, + headers=headers, + ) + return UsagePolicy.from_dict(res) diff --git a/databricks/sdk/service/catalog.py b/databricks/sdk/service/catalog.py index a99c5405a..fb2f7e0be 100755 --- a/databricks/sdk/service/catalog.py +++ b/databricks/sdk/service/catalog.py @@ -23,9 +23,6 @@ @dataclass class AccessRequestDestinations: - destinations: List[NotificationDestination] - """The access request destinations for the securable.""" - securable: Securable """The securable for which the access request destinations are being retrieved.""" @@ -33,6 +30,9 @@ class AccessRequestDestinations: """Indicates whether any destinations are hidden from the caller due to a lack of permissions. This value is true if the caller does not have permission to see all destinations.""" + destinations: Optional[List[NotificationDestination]] = None + """The access request destinations for the securable.""" + def as_dict(self) -> dict: """Serializes the AccessRequestDestinations into a dictionary suitable for use as a JSON request body.""" body = {} @@ -1042,12 +1042,18 @@ class CatalogInfo: connection_name: Optional[str] = None """The name of the connection to an external data source.""" + conversion_info: Optional[ConversionInfo] = None + """Status of conversion of FOREIGN catalog to UC Native catalog.""" + created_at: Optional[int] = None """Time at which this catalog was created, in epoch milliseconds.""" created_by: Optional[str] = None """Username of catalog creator.""" + dr_replication_info: Optional[DrReplicationInfo] = None + """Disaster Recovery replication state snapshot.""" + effective_predictive_optimization_flag: Optional[EffectivePredictiveOptimizationFlag] = None enable_predictive_optimization: Optional[EnablePredictiveOptimization] = None @@ -1109,10 +1115,14 @@ def as_dict(self) -> dict: body["comment"] = self.comment if self.connection_name is not None: body["connection_name"] = self.connection_name + if self.conversion_info: + body["conversion_info"] = self.conversion_info.as_dict() if self.created_at is not None: body["created_at"] = self.created_at if self.created_by is not None: body["created_by"] = self.created_by + if self.dr_replication_info: + body["dr_replication_info"] = self.dr_replication_info.as_dict() if self.effective_predictive_optimization_flag: body["effective_predictive_optimization_flag"] = self.effective_predictive_optimization_flag.as_dict() if self.enable_predictive_optimization is not None: @@ -1160,10 +1170,14 @@ def as_shallow_dict(self) -> dict: body["comment"] = self.comment if self.connection_name is not None: body["connection_name"] = self.connection_name + if self.conversion_info: + body["conversion_info"] = self.conversion_info if self.created_at is not None: body["created_at"] = self.created_at if self.created_by is not None: body["created_by"] = self.created_by + if self.dr_replication_info: + body["dr_replication_info"] = self.dr_replication_info if self.effective_predictive_optimization_flag: body["effective_predictive_optimization_flag"] = self.effective_predictive_optimization_flag if self.enable_predictive_optimization is not None: @@ -1208,8 +1222,10 @@ def from_dict(cls, d: Dict[str, Any]) -> CatalogInfo: catalog_type=_enum(d, "catalog_type", CatalogType), comment=d.get("comment", None), connection_name=d.get("connection_name", None), + conversion_info=_from_dict(d, "conversion_info", ConversionInfo), created_at=d.get("created_at", None), created_by=d.get("created_by", None), + dr_replication_info=_from_dict(d, "dr_replication_info", DrReplicationInfo), effective_predictive_optimization_flag=_from_dict( d, "effective_predictive_optimization_flag", EffectivePredictiveOptimizationFlag ), @@ -1598,6 +1614,9 @@ class ConnectionInfo: credential_type: Optional[CredentialType] = None """The type of credential.""" + environment_settings: Optional[EnvironmentSettings] = None + """[Create,Update:OPT] Connection environment settings as EnvironmentSettings object.""" + full_name: Optional[str] = None """Full name of connection.""" @@ -1647,6 +1666,8 @@ def as_dict(self) -> dict: body["created_by"] = self.created_by if self.credential_type is not None: body["credential_type"] = self.credential_type.value + if self.environment_settings: + body["environment_settings"] = self.environment_settings.as_dict() if self.full_name is not None: body["full_name"] = self.full_name if self.metastore_id is not None: @@ -1688,6 +1709,8 @@ def as_shallow_dict(self) -> dict: body["created_by"] = self.created_by if self.credential_type is not None: body["credential_type"] = self.credential_type + if self.environment_settings: + body["environment_settings"] = self.environment_settings if self.full_name is not None: body["full_name"] = self.full_name if self.metastore_id is not None: @@ -1724,6 +1747,7 @@ def from_dict(cls, d: Dict[str, Any]) -> ConnectionInfo: created_at=d.get("created_at", None), created_by=d.get("created_by", None), credential_type=_enum(d, "credential_type", CredentialType), + environment_settings=_from_dict(d, "environment_settings", EnvironmentSettings), full_name=d.get("full_name", None), metastore_id=d.get("metastore_id", None), name=d.get("name", None), @@ -1740,7 +1764,7 @@ def from_dict(cls, d: Dict[str, Any]) -> ConnectionInfo: class ConnectionType(Enum): - """Next Id: 47""" + """Next Id: 48""" BIGQUERY = "BIGQUERY" DATABRICKS = "DATABRICKS" @@ -1813,6 +1837,39 @@ def from_dict(cls, d: Dict[str, Any]) -> ContinuousUpdateStatus: ) +@dataclass +class ConversionInfo: + """Status of conversion of FOREIGN entity into UC Native entity.""" + + state: Optional[ConversionInfoState] = None + """The conversion state of the resource.""" + + def as_dict(self) -> dict: + """Serializes the ConversionInfo into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.state is not None: + body["state"] = self.state.value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ConversionInfo into a shallow dictionary of its immediate attributes.""" + body = {} + if self.state is not None: + body["state"] = self.state + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ConversionInfo: + """Deserializes the ConversionInfo from a dictionary.""" + return cls(state=_enum(d, "state", ConversionInfoState)) + + +class ConversionInfoState(Enum): + + COMPLETED = "COMPLETED" + IN_PROGRESS = "IN_PROGRESS" + + @dataclass class CreateAccessRequest: behalf_of: Optional[Principal] = None @@ -2997,6 +3054,38 @@ def from_dict(cls, d: Dict[str, Any]) -> DisableResponse: return cls() +@dataclass +class DrReplicationInfo: + """Metadata related to Disaster Recovery.""" + + status: Optional[DrReplicationStatus] = None + + def as_dict(self) -> dict: + """Serializes the DrReplicationInfo into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.status is not None: + body["status"] = self.status.value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DrReplicationInfo into a shallow dictionary of its immediate attributes.""" + body = {} + if self.status is not None: + body["status"] = self.status + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DrReplicationInfo: + """Deserializes the DrReplicationInfo from a dictionary.""" + return cls(status=_enum(d, "status", DrReplicationStatus)) + + +class DrReplicationStatus(Enum): + + DR_REPLICATION_STATUS_PRIMARY = "DR_REPLICATION_STATUS_PRIMARY" + DR_REPLICATION_STATUS_SECONDARY = "DR_REPLICATION_STATUS_SECONDARY" + + @dataclass class EffectivePermissionsList: next_page_token: Optional[str] = None @@ -3267,6 +3356,38 @@ def from_dict(cls, d: Dict[str, Any]) -> EntityTagAssignment: ) +@dataclass +class EnvironmentSettings: + environment_version: Optional[str] = None + + java_dependencies: Optional[List[str]] = None + + def as_dict(self) -> dict: + """Serializes the EnvironmentSettings into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.environment_version is not None: + body["environment_version"] = self.environment_version + if self.java_dependencies: + body["java_dependencies"] = [v for v in self.java_dependencies] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the EnvironmentSettings into a shallow dictionary of its immediate attributes.""" + body = {} + if self.environment_version is not None: + body["environment_version"] = self.environment_version + if self.java_dependencies: + body["java_dependencies"] = self.java_dependencies + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> EnvironmentSettings: + """Deserializes the EnvironmentSettings from a dictionary.""" + return cls( + environment_version=d.get("environment_version", None), java_dependencies=d.get("java_dependencies", None) + ) + + @dataclass class ExternalLineageExternalMetadata: name: Optional[str] = None @@ -4836,6 +4957,8 @@ class GenerateTemporaryPathCredentialResponse: r2_temp_credentials: Optional[R2Credentials] = None + uc_encrypted_token: Optional[UcEncryptedToken] = None + url: Optional[str] = None """The URL of the storage path accessible by the temporary credential.""" @@ -4854,6 +4977,8 @@ def as_dict(self) -> dict: body["gcp_oauth_token"] = self.gcp_oauth_token.as_dict() if self.r2_temp_credentials: body["r2_temp_credentials"] = self.r2_temp_credentials.as_dict() + if self.uc_encrypted_token: + body["uc_encrypted_token"] = self.uc_encrypted_token.as_dict() if self.url is not None: body["url"] = self.url return body @@ -4873,6 +4998,8 @@ def as_shallow_dict(self) -> dict: body["gcp_oauth_token"] = self.gcp_oauth_token if self.r2_temp_credentials: body["r2_temp_credentials"] = self.r2_temp_credentials + if self.uc_encrypted_token: + body["uc_encrypted_token"] = self.uc_encrypted_token if self.url is not None: body["url"] = self.url return body @@ -4887,6 +5014,7 @@ def from_dict(cls, d: Dict[str, Any]) -> GenerateTemporaryPathCredentialResponse expiration_time=d.get("expiration_time", None), gcp_oauth_token=_from_dict(d, "gcp_oauth_token", GcpOauthToken), r2_temp_credentials=_from_dict(d, "r2_temp_credentials", R2Credentials), + uc_encrypted_token=_from_dict(d, "uc_encrypted_token", UcEncryptedToken), url=d.get("url", None), ) @@ -4965,6 +5093,8 @@ class GenerateTemporaryTableCredentialResponse: r2_temp_credentials: Optional[R2Credentials] = None + uc_encrypted_token: Optional[UcEncryptedToken] = None + url: Optional[str] = None """The URL of the storage path accessible by the temporary credential.""" @@ -4983,6 +5113,8 @@ def as_dict(self) -> dict: body["gcp_oauth_token"] = self.gcp_oauth_token.as_dict() if self.r2_temp_credentials: body["r2_temp_credentials"] = self.r2_temp_credentials.as_dict() + if self.uc_encrypted_token: + body["uc_encrypted_token"] = self.uc_encrypted_token.as_dict() if self.url is not None: body["url"] = self.url return body @@ -5002,6 +5134,8 @@ def as_shallow_dict(self) -> dict: body["gcp_oauth_token"] = self.gcp_oauth_token if self.r2_temp_credentials: body["r2_temp_credentials"] = self.r2_temp_credentials + if self.uc_encrypted_token: + body["uc_encrypted_token"] = self.uc_encrypted_token if self.url is not None: body["url"] = self.url return body @@ -5016,6 +5150,7 @@ def from_dict(cls, d: Dict[str, Any]) -> GenerateTemporaryTableCredentialRespons expiration_time=d.get("expiration_time", None), gcp_oauth_token=_from_dict(d, "gcp_oauth_token", GcpOauthToken), r2_temp_credentials=_from_dict(d, "r2_temp_credentials", R2Credentials), + uc_encrypted_token=_from_dict(d, "uc_encrypted_token", UcEncryptedToken), url=d.get("url", None), ) @@ -7640,6 +7775,15 @@ class PermissionsChange: """The principal whose privileges we are changing. Only one of principal or principal_id should be specified, never both at the same time.""" + principal_id: Optional[int] = None + """An opaque internal ID that identifies the principal whose privileges should be removed. + + This field is intended for removing privileges associated with a deleted user. When set, only + the entries specified in the remove field are processed; any entries in the add field will be + rejected. + + Only one of principal or principal_id should be specified, never both at the same time.""" + remove: Optional[List[Privilege]] = None """The set of privileges to remove.""" @@ -7650,6 +7794,8 @@ def as_dict(self) -> dict: body["add"] = [v.value for v in self.add] if self.principal is not None: body["principal"] = self.principal + if self.principal_id is not None: + body["principal_id"] = self.principal_id if self.remove: body["remove"] = [v.value for v in self.remove] return body @@ -7661,6 +7807,8 @@ def as_shallow_dict(self) -> dict: body["add"] = self.add if self.principal is not None: body["principal"] = self.principal + if self.principal_id is not None: + body["principal_id"] = self.principal_id if self.remove: body["remove"] = self.remove return body @@ -7671,6 +7819,7 @@ def from_dict(cls, d: Dict[str, Any]) -> PermissionsChange: return cls( add=_repeated_enum(d, "add", Privilege), principal=d.get("principal", None), + principal_id=d.get("principal_id", None), remove=_repeated_enum(d, "remove", Privilege), ) @@ -8058,6 +8207,10 @@ class PrivilegeAssignment: """The principal (user email address or group name). For deleted principals, `principal` is empty while `principal_id` is populated.""" + principal_id: Optional[int] = None + """Unique identifier of the principal. For active principals, both `principal` and `principal_id` + are present.""" + privileges: Optional[List[Privilege]] = None """The privileges assigned to the principal.""" @@ -8066,6 +8219,8 @@ def as_dict(self) -> dict: body = {} if self.principal is not None: body["principal"] = self.principal + if self.principal_id is not None: + body["principal_id"] = self.principal_id if self.privileges: body["privileges"] = [v.value for v in self.privileges] return body @@ -8075,6 +8230,8 @@ def as_shallow_dict(self) -> dict: body = {} if self.principal is not None: body["principal"] = self.principal + if self.principal_id is not None: + body["principal_id"] = self.principal_id if self.privileges: body["privileges"] = self.privileges return body @@ -8082,7 +8239,11 @@ def as_shallow_dict(self) -> dict: @classmethod def from_dict(cls, d: Dict[str, Any]) -> PrivilegeAssignment: """Deserializes the PrivilegeAssignment from a dictionary.""" - return cls(principal=d.get("principal", None), privileges=_repeated_enum(d, "privileges", Privilege)) + return cls( + principal=d.get("principal", None), + principal_id=d.get("principal_id", None), + privileges=_repeated_enum(d, "privileges", Privilege), + ) @dataclass @@ -8745,7 +8906,7 @@ def from_dict(cls, d: Dict[str, Any]) -> Securable: class SecurableKind(Enum): - """Latest kind: CONNECTION_AWS_SECRETS_MANAGER = 270; Next id:271""" + """Latest kind: CONNECTION_SLACK_OAUTH_U2M_MAPPING = 272; Next id:273""" TABLE_DB_STORAGE = "TABLE_DB_STORAGE" TABLE_DELTA = "TABLE_DELTA" @@ -9822,6 +9983,34 @@ def from_dict(cls, d: Dict[str, Any]) -> TriggeredUpdateStatus: ) +@dataclass +class UcEncryptedToken: + """Encrypted token used when we cannot downscope the cloud provider token appropriately See: + https://docs.google.com/document/d/1hEKDnSckuU5PIS798CtfqBElrMR6OJuR2wgz_BjhMSY""" + + encrypted_payload: Optional[str] = None + """Stores encrypted ScopedCloudToken as a base64-encoded string""" + + def as_dict(self) -> dict: + """Serializes the UcEncryptedToken into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.encrypted_payload is not None: + body["encrypted_payload"] = self.encrypted_payload + return body + + def as_shallow_dict(self) -> dict: + """Serializes the UcEncryptedToken into a shallow dictionary of its immediate attributes.""" + body = {} + if self.encrypted_payload is not None: + body["encrypted_payload"] = self.encrypted_payload + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> UcEncryptedToken: + """Deserializes the UcEncryptedToken from a dictionary.""" + return cls(encrypted_payload=d.get("encrypted_payload", None)) + + @dataclass class UnassignResponse: def as_dict(self) -> dict: @@ -11057,6 +11246,8 @@ def create( *, comment: Optional[str] = None, connection_name: Optional[str] = None, + conversion_info: Optional[ConversionInfo] = None, + dr_replication_info: Optional[DrReplicationInfo] = None, options: Optional[Dict[str, str]] = None, properties: Optional[Dict[str, str]] = None, provider_name: Optional[str] = None, @@ -11072,6 +11263,10 @@ def create( User-provided free-form text description. :param connection_name: str (optional) The name of the connection to an external data source. + :param conversion_info: :class:`ConversionInfo` (optional) + Status of conversion of FOREIGN catalog to UC Native catalog. + :param dr_replication_info: :class:`DrReplicationInfo` (optional) + Disaster Recovery replication state snapshot. :param options: Dict[str,str] (optional) A map of key-value properties attached to the securable. :param properties: Dict[str,str] (optional) @@ -11093,6 +11288,10 @@ def create( body["comment"] = comment if connection_name is not None: body["connection_name"] = connection_name + if conversion_info is not None: + body["conversion_info"] = conversion_info.as_dict() + if dr_replication_info is not None: + body["dr_replication_info"] = dr_replication_info.as_dict() if name is not None: body["name"] = name if options is not None: @@ -11226,6 +11425,8 @@ def update( name: str, *, comment: Optional[str] = None, + conversion_info: Optional[ConversionInfo] = None, + dr_replication_info: Optional[DrReplicationInfo] = None, enable_predictive_optimization: Optional[EnablePredictiveOptimization] = None, isolation_mode: Optional[CatalogIsolationMode] = None, new_name: Optional[str] = None, @@ -11240,6 +11441,10 @@ def update( The name of the catalog. :param comment: str (optional) User-provided free-form text description. + :param conversion_info: :class:`ConversionInfo` (optional) + Status of conversion of FOREIGN catalog to UC Native catalog. + :param dr_replication_info: :class:`DrReplicationInfo` (optional) + Disaster Recovery replication state snapshot. :param enable_predictive_optimization: :class:`EnablePredictiveOptimization` (optional) Whether predictive optimization should be enabled for this object and objects under it. :param isolation_mode: :class:`CatalogIsolationMode` (optional) @@ -11259,6 +11464,10 @@ def update( body = {} if comment is not None: body["comment"] = comment + if conversion_info is not None: + body["conversion_info"] = conversion_info.as_dict() + if dr_replication_info is not None: + body["dr_replication_info"] = dr_replication_info.as_dict() if enable_predictive_optimization is not None: body["enable_predictive_optimization"] = enable_predictive_optimization.value if isolation_mode is not None: @@ -11300,6 +11509,7 @@ def create( options: Dict[str, str], *, comment: Optional[str] = None, + environment_settings: Optional[EnvironmentSettings] = None, properties: Optional[Dict[str, str]] = None, read_only: Optional[bool] = None, ) -> ConnectionInfo: @@ -11316,6 +11526,8 @@ def create( A map of key-value properties attached to the securable. :param comment: str (optional) User-provided free-form text description. + :param environment_settings: :class:`EnvironmentSettings` (optional) + [Create,Update:OPT] Connection environment settings as EnvironmentSettings object. :param properties: Dict[str,str] (optional) A map of key-value properties attached to the securable. :param read_only: bool (optional) @@ -11329,6 +11541,8 @@ def create( body["comment"] = comment if connection_type is not None: body["connection_type"] = connection_type.value + if environment_settings is not None: + body["environment_settings"] = environment_settings.as_dict() if name is not None: body["name"] = name if options is not None: @@ -11418,7 +11632,13 @@ def list(self, *, max_results: Optional[int] = None, page_token: Optional[str] = query["page_token"] = json["next_page_token"] def update( - self, name: str, options: Dict[str, str], *, new_name: Optional[str] = None, owner: Optional[str] = None + self, + name: str, + options: Dict[str, str], + *, + environment_settings: Optional[EnvironmentSettings] = None, + new_name: Optional[str] = None, + owner: Optional[str] = None, ) -> ConnectionInfo: """Updates the connection that matches the supplied name. @@ -11426,6 +11646,8 @@ def update( Name of the connection. :param options: Dict[str,str] A map of key-value properties attached to the securable. + :param environment_settings: :class:`EnvironmentSettings` (optional) + [Create,Update:OPT] Connection environment settings as EnvironmentSettings object. :param new_name: str (optional) New name for the connection. :param owner: str (optional) @@ -11435,6 +11657,8 @@ def update( """ body = {} + if environment_settings is not None: + body["environment_settings"] = environment_settings.as_dict() if new_name is not None: body["new_name"] = new_name if options is not None: @@ -12759,6 +12983,7 @@ def get( securable_type: str, full_name: str, *, + include_deleted_principals: Optional[bool] = None, max_results: Optional[int] = None, page_token: Optional[str] = None, principal: Optional[str] = None, @@ -12776,6 +13001,8 @@ def get( Type of securable. :param full_name: str Full name of securable. + :param include_deleted_principals: bool (optional) + Optional. If true, also return privilege assignments whose principals have been deleted. :param max_results: int (optional) Specifies the maximum number of privileges to return (page length). Every PrivilegeAssignment present in a single page response is guaranteed to contain all the privileges granted on the @@ -12795,6 +13022,8 @@ def get( """ query = {} + if include_deleted_principals is not None: + query["include_deleted_principals"] = include_deleted_principals if max_results is not None: query["max_results"] = max_results if page_token is not None: @@ -14650,12 +14879,10 @@ def list_quotas( class RfaAPI: - """Request for Access enables customers to request access to and manage access request destinations for Unity - Catalog securables. + """Request for Access enables users to request access for Unity Catalog securables. - These APIs provide a standardized way to update, get, and request to access request destinations. - Fine-grained authorization ensures that only users with appropriate permissions can manage access request - destinations.""" + These APIs provide a standardized way for securable owners (or users with MANAGE privileges) to manage + access request destinations.""" def __init__(self, api_client): self._api = api_client diff --git a/databricks/sdk/service/cleanrooms.py b/databricks/sdk/service/cleanrooms.py index 68f5f5712..f4aafd275 100755 --- a/databricks/sdk/service/cleanrooms.py +++ b/databricks/sdk/service/cleanrooms.py @@ -32,6 +32,11 @@ class CleanRoom: created_at: Optional[int] = None """When the clean room was created, in epoch milliseconds.""" + enable_shared_output: Optional[bool] = None + """Whether allow task to write to shared output schema. When enabled, clean room task runs + triggered by the current collaborator can write to the run-scoped shared output schema which is + accessible by all collaborators.""" + local_collaborator_alias: Optional[str] = None """The alias of the collaborator tied to the local clean room.""" @@ -67,6 +72,8 @@ def as_dict(self) -> dict: body["comment"] = self.comment if self.created_at is not None: body["created_at"] = self.created_at + if self.enable_shared_output is not None: + body["enable_shared_output"] = self.enable_shared_output if self.local_collaborator_alias is not None: body["local_collaborator_alias"] = self.local_collaborator_alias if self.name is not None: @@ -92,6 +99,8 @@ def as_shallow_dict(self) -> dict: body["comment"] = self.comment if self.created_at is not None: body["created_at"] = self.created_at + if self.enable_shared_output is not None: + body["enable_shared_output"] = self.enable_shared_output if self.local_collaborator_alias is not None: body["local_collaborator_alias"] = self.local_collaborator_alias if self.name is not None: @@ -115,6 +124,7 @@ def from_dict(cls, d: Dict[str, Any]) -> CleanRoom: access_restricted=_enum(d, "access_restricted", CleanRoomAccessRestricted), comment=d.get("comment", None), created_at=d.get("created_at", None), + enable_shared_output=d.get("enable_shared_output", None), local_collaborator_alias=d.get("local_collaborator_alias", None), name=d.get("name", None), output_catalog=_from_dict(d, "output_catalog", CleanRoomOutputCatalog), @@ -809,6 +819,13 @@ class CleanRoomNotebookTaskRun: run_duration: Optional[int] = None """Duration of the task run, in milliseconds.""" + shared_output_schema_expiration_time: Optional[int] = None + """Expiration time of the shared output schema of the task run (if any), in epoch milliseconds.""" + + shared_output_schema_name: Optional[str] = None + """Name of the shared output schema associated with the clean rooms notebook task run. This schema + is accessible by all collaborators when enable_shared_output is true.""" + start_time: Optional[int] = None """When the task run started, in epoch milliseconds.""" @@ -831,6 +848,10 @@ def as_dict(self) -> dict: body["output_schema_name"] = self.output_schema_name if self.run_duration is not None: body["run_duration"] = self.run_duration + if self.shared_output_schema_expiration_time is not None: + body["shared_output_schema_expiration_time"] = self.shared_output_schema_expiration_time + if self.shared_output_schema_name is not None: + body["shared_output_schema_name"] = self.shared_output_schema_name if self.start_time is not None: body["start_time"] = self.start_time return body @@ -854,6 +875,10 @@ def as_shallow_dict(self) -> dict: body["output_schema_name"] = self.output_schema_name if self.run_duration is not None: body["run_duration"] = self.run_duration + if self.shared_output_schema_expiration_time is not None: + body["shared_output_schema_expiration_time"] = self.shared_output_schema_expiration_time + if self.shared_output_schema_name is not None: + body["shared_output_schema_name"] = self.shared_output_schema_name if self.start_time is not None: body["start_time"] = self.start_time return body @@ -870,6 +895,8 @@ def from_dict(cls, d: Dict[str, Any]) -> CleanRoomNotebookTaskRun: output_schema_expiration_time=d.get("output_schema_expiration_time", None), output_schema_name=d.get("output_schema_name", None), run_duration=d.get("run_duration", None), + shared_output_schema_expiration_time=d.get("shared_output_schema_expiration_time", None), + shared_output_schema_name=d.get("shared_output_schema_name", None), start_time=d.get("start_time", None), ) diff --git a/databricks/sdk/service/compute.py b/databricks/sdk/service/compute.py index 64e6f4b0a..ac4850883 100755 --- a/databricks/sdk/service/compute.py +++ b/databricks/sdk/service/compute.py @@ -5,6 +5,7 @@ import logging import random import time +import uuid from dataclasses import dataclass from datetime import timedelta from enum import Enum @@ -171,7 +172,7 @@ class AwsAttributes: be of a form like "us-west-2a". The provided availability zone must be in the same region as the Databricks deployment. For example, "us-west-2a" is not a valid zone id if the Databricks deployment resides in the "us-east-1" region. This is an optional field at cluster creation, and - if not specified, a default zone will be used. If the zone specified is "auto", will try to + if not specified, the zone "auto" will be used. If the zone specified is "auto", will try to place cluster in a zone with high availability, and will retry placement in a different AZ if there is not enough capacity. @@ -327,6 +328,13 @@ class AzureAvailability(Enum): SPOT_WITH_FALLBACK_AZURE = "SPOT_WITH_FALLBACK_AZURE" +class BaseEnvironmentType(Enum): + """If changed, also update estore/namespaces/defaultbaseenvironments/latest.proto""" + + CPU = "CPU" + GPU = "GPU" + + @dataclass class CancelResponse: def as_dict(self) -> dict: @@ -2720,6 +2728,184 @@ def from_dict(cls, d: Dict[str, Any]) -> DbfsStorageInfo: return cls(destination=d.get("destination", None)) +@dataclass +class DefaultBaseEnvironment: + base_environment_cache: Optional[List[DefaultBaseEnvironmentCache]] = None + + base_environment_type: Optional[BaseEnvironmentType] = None + + created_timestamp: Optional[int] = None + + creator_user_id: Optional[int] = None + + environment: Optional[Environment] = None + """Note: we made `environment` non-internal because we need to expose its `client` field. All other + fields should be treated as internal.""" + + filepath: Optional[str] = None + + id: Optional[str] = None + + is_default: Optional[bool] = None + + last_updated_timestamp: Optional[int] = None + + last_updated_user_id: Optional[int] = None + + message: Optional[str] = None + + name: Optional[str] = None + + principal_ids: Optional[List[int]] = None + + status: Optional[DefaultBaseEnvironmentCacheStatus] = None + + def as_dict(self) -> dict: + """Serializes the DefaultBaseEnvironment into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.base_environment_cache: + body["base_environment_cache"] = [v.as_dict() for v in self.base_environment_cache] + if self.base_environment_type is not None: + body["base_environment_type"] = self.base_environment_type.value + if self.created_timestamp is not None: + body["created_timestamp"] = self.created_timestamp + if self.creator_user_id is not None: + body["creator_user_id"] = self.creator_user_id + if self.environment: + body["environment"] = self.environment.as_dict() + if self.filepath is not None: + body["filepath"] = self.filepath + if self.id is not None: + body["id"] = self.id + if self.is_default is not None: + body["is_default"] = self.is_default + if self.last_updated_timestamp is not None: + body["last_updated_timestamp"] = self.last_updated_timestamp + if self.last_updated_user_id is not None: + body["last_updated_user_id"] = self.last_updated_user_id + if self.message is not None: + body["message"] = self.message + if self.name is not None: + body["name"] = self.name + if self.principal_ids: + body["principal_ids"] = [v for v in self.principal_ids] + if self.status is not None: + body["status"] = self.status.value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DefaultBaseEnvironment into a shallow dictionary of its immediate attributes.""" + body = {} + if self.base_environment_cache: + body["base_environment_cache"] = self.base_environment_cache + if self.base_environment_type is not None: + body["base_environment_type"] = self.base_environment_type + if self.created_timestamp is not None: + body["created_timestamp"] = self.created_timestamp + if self.creator_user_id is not None: + body["creator_user_id"] = self.creator_user_id + if self.environment: + body["environment"] = self.environment + if self.filepath is not None: + body["filepath"] = self.filepath + if self.id is not None: + body["id"] = self.id + if self.is_default is not None: + body["is_default"] = self.is_default + if self.last_updated_timestamp is not None: + body["last_updated_timestamp"] = self.last_updated_timestamp + if self.last_updated_user_id is not None: + body["last_updated_user_id"] = self.last_updated_user_id + if self.message is not None: + body["message"] = self.message + if self.name is not None: + body["name"] = self.name + if self.principal_ids: + body["principal_ids"] = self.principal_ids + if self.status is not None: + body["status"] = self.status + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DefaultBaseEnvironment: + """Deserializes the DefaultBaseEnvironment from a dictionary.""" + return cls( + base_environment_cache=_repeated_dict(d, "base_environment_cache", DefaultBaseEnvironmentCache), + base_environment_type=_enum(d, "base_environment_type", BaseEnvironmentType), + created_timestamp=d.get("created_timestamp", None), + creator_user_id=d.get("creator_user_id", None), + environment=_from_dict(d, "environment", Environment), + filepath=d.get("filepath", None), + id=d.get("id", None), + is_default=d.get("is_default", None), + last_updated_timestamp=d.get("last_updated_timestamp", None), + last_updated_user_id=d.get("last_updated_user_id", None), + message=d.get("message", None), + name=d.get("name", None), + principal_ids=d.get("principal_ids", None), + status=_enum(d, "status", DefaultBaseEnvironmentCacheStatus), + ) + + +@dataclass +class DefaultBaseEnvironmentCache: + indefinite_materialized_environment: Optional[MaterializedEnvironment] = None + + materialized_environment: Optional[MaterializedEnvironment] = None + + message: Optional[str] = None + + status: Optional[DefaultBaseEnvironmentCacheStatus] = None + + def as_dict(self) -> dict: + """Serializes the DefaultBaseEnvironmentCache into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.indefinite_materialized_environment: + body["indefinite_materialized_environment"] = self.indefinite_materialized_environment.as_dict() + if self.materialized_environment: + body["materialized_environment"] = self.materialized_environment.as_dict() + if self.message is not None: + body["message"] = self.message + if self.status is not None: + body["status"] = self.status.value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DefaultBaseEnvironmentCache into a shallow dictionary of its immediate attributes.""" + body = {} + if self.indefinite_materialized_environment: + body["indefinite_materialized_environment"] = self.indefinite_materialized_environment + if self.materialized_environment: + body["materialized_environment"] = self.materialized_environment + if self.message is not None: + body["message"] = self.message + if self.status is not None: + body["status"] = self.status + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DefaultBaseEnvironmentCache: + """Deserializes the DefaultBaseEnvironmentCache from a dictionary.""" + return cls( + indefinite_materialized_environment=_from_dict( + d, "indefinite_materialized_environment", MaterializedEnvironment + ), + materialized_environment=_from_dict(d, "materialized_environment", MaterializedEnvironment), + message=d.get("message", None), + status=_enum(d, "status", DefaultBaseEnvironmentCacheStatus), + ) + + +class DefaultBaseEnvironmentCacheStatus(Enum): + + CREATED = "CREATED" + EXPIRED = "EXPIRED" + FAILED = "FAILED" + INVALID = "INVALID" + PENDING = "PENDING" + REFRESHING = "REFRESHING" + + @dataclass class DeleteClusterResponse: def as_dict(self) -> dict: @@ -3828,6 +4014,10 @@ class GetInstancePool: disk_spec: Optional[DiskSpec] = None """Defines the specification of the disks that will be attached to all spark containers.""" + enable_auto_alternate_node_types: Optional[bool] = None + """For pools with node type flexibility (Fleet-V2), whether auto generated alternate node type ids + are enabled. This field should not be true if node_type_flexibility is set.""" + enable_elastic_disk: Optional[bool] = None """Autoscaling Local Storage: when enabled, this instances in this pool will dynamically acquire additional disk space when its Spark workers are running low on disk space. In AWS, this feature @@ -3857,6 +4047,11 @@ class GetInstancePool: min_idle_instances: Optional[int] = None """Minimum number of idle instances to keep in the instance pool""" + node_type_flexibility: Optional[NodeTypeFlexibility] = None + """For pools with node type flexibility (Fleet-V2), this object contains the information about the + alternate node type ids to use when attempting to launch a cluster if the node type id is not + available. This field should not be set if enable_auto_alternate_node_types is true.""" + node_type_id: Optional[str] = None """This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or @@ -3901,6 +4096,8 @@ def as_dict(self) -> dict: body["default_tags"] = self.default_tags if self.disk_spec: body["disk_spec"] = self.disk_spec.as_dict() + if self.enable_auto_alternate_node_types is not None: + body["enable_auto_alternate_node_types"] = self.enable_auto_alternate_node_types if self.enable_elastic_disk is not None: body["enable_elastic_disk"] = self.enable_elastic_disk if self.gcp_attributes: @@ -3915,6 +4112,8 @@ def as_dict(self) -> dict: body["max_capacity"] = self.max_capacity if self.min_idle_instances is not None: body["min_idle_instances"] = self.min_idle_instances + if self.node_type_flexibility: + body["node_type_flexibility"] = self.node_type_flexibility.as_dict() if self.node_type_id is not None: body["node_type_id"] = self.node_type_id if self.preloaded_docker_images: @@ -3946,6 +4145,8 @@ def as_shallow_dict(self) -> dict: body["default_tags"] = self.default_tags if self.disk_spec: body["disk_spec"] = self.disk_spec + if self.enable_auto_alternate_node_types is not None: + body["enable_auto_alternate_node_types"] = self.enable_auto_alternate_node_types if self.enable_elastic_disk is not None: body["enable_elastic_disk"] = self.enable_elastic_disk if self.gcp_attributes: @@ -3960,6 +4161,8 @@ def as_shallow_dict(self) -> dict: body["max_capacity"] = self.max_capacity if self.min_idle_instances is not None: body["min_idle_instances"] = self.min_idle_instances + if self.node_type_flexibility: + body["node_type_flexibility"] = self.node_type_flexibility if self.node_type_id is not None: body["node_type_id"] = self.node_type_id if self.preloaded_docker_images: @@ -3987,6 +4190,7 @@ def from_dict(cls, d: Dict[str, Any]) -> GetInstancePool: custom_tags=d.get("custom_tags", None), default_tags=d.get("default_tags", None), disk_spec=_from_dict(d, "disk_spec", DiskSpec), + enable_auto_alternate_node_types=d.get("enable_auto_alternate_node_types", None), enable_elastic_disk=d.get("enable_elastic_disk", None), gcp_attributes=_from_dict(d, "gcp_attributes", InstancePoolGcpAttributes), idle_instance_autotermination_minutes=d.get("idle_instance_autotermination_minutes", None), @@ -3994,6 +4198,7 @@ def from_dict(cls, d: Dict[str, Any]) -> GetInstancePool: instance_pool_name=d.get("instance_pool_name", None), max_capacity=d.get("max_capacity", None), min_idle_instances=d.get("min_idle_instances", None), + node_type_flexibility=_from_dict(d, "node_type_flexibility", NodeTypeFlexibility), node_type_id=d.get("node_type_id", None), preloaded_docker_images=_repeated_dict(d, "preloaded_docker_images", DockerImage), preloaded_spark_versions=d.get("preloaded_spark_versions", None), @@ -4639,6 +4844,10 @@ class InstancePoolAndStats: disk_spec: Optional[DiskSpec] = None """Defines the specification of the disks that will be attached to all spark containers.""" + enable_auto_alternate_node_types: Optional[bool] = None + """For pools with node type flexibility (Fleet-V2), whether auto generated alternate node type ids + are enabled. This field should not be true if node_type_flexibility is set.""" + enable_elastic_disk: Optional[bool] = None """Autoscaling Local Storage: when enabled, this instances in this pool will dynamically acquire additional disk space when its Spark workers are running low on disk space. In AWS, this feature @@ -4671,6 +4880,11 @@ class InstancePoolAndStats: min_idle_instances: Optional[int] = None """Minimum number of idle instances to keep in the instance pool""" + node_type_flexibility: Optional[NodeTypeFlexibility] = None + """For pools with node type flexibility (Fleet-V2), this object contains the information about the + alternate node type ids to use when attempting to launch a cluster if the node type id is not + available. This field should not be set if enable_auto_alternate_node_types is true.""" + node_type_id: Optional[str] = None """This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or @@ -4715,6 +4929,8 @@ def as_dict(self) -> dict: body["default_tags"] = self.default_tags if self.disk_spec: body["disk_spec"] = self.disk_spec.as_dict() + if self.enable_auto_alternate_node_types is not None: + body["enable_auto_alternate_node_types"] = self.enable_auto_alternate_node_types if self.enable_elastic_disk is not None: body["enable_elastic_disk"] = self.enable_elastic_disk if self.gcp_attributes: @@ -4729,6 +4945,8 @@ def as_dict(self) -> dict: body["max_capacity"] = self.max_capacity if self.min_idle_instances is not None: body["min_idle_instances"] = self.min_idle_instances + if self.node_type_flexibility: + body["node_type_flexibility"] = self.node_type_flexibility.as_dict() if self.node_type_id is not None: body["node_type_id"] = self.node_type_id if self.preloaded_docker_images: @@ -4760,6 +4978,8 @@ def as_shallow_dict(self) -> dict: body["default_tags"] = self.default_tags if self.disk_spec: body["disk_spec"] = self.disk_spec + if self.enable_auto_alternate_node_types is not None: + body["enable_auto_alternate_node_types"] = self.enable_auto_alternate_node_types if self.enable_elastic_disk is not None: body["enable_elastic_disk"] = self.enable_elastic_disk if self.gcp_attributes: @@ -4774,6 +4994,8 @@ def as_shallow_dict(self) -> dict: body["max_capacity"] = self.max_capacity if self.min_idle_instances is not None: body["min_idle_instances"] = self.min_idle_instances + if self.node_type_flexibility: + body["node_type_flexibility"] = self.node_type_flexibility if self.node_type_id is not None: body["node_type_id"] = self.node_type_id if self.preloaded_docker_images: @@ -4801,6 +5023,7 @@ def from_dict(cls, d: Dict[str, Any]) -> InstancePoolAndStats: custom_tags=d.get("custom_tags", None), default_tags=d.get("default_tags", None), disk_spec=_from_dict(d, "disk_spec", DiskSpec), + enable_auto_alternate_node_types=d.get("enable_auto_alternate_node_types", None), enable_elastic_disk=d.get("enable_elastic_disk", None), gcp_attributes=_from_dict(d, "gcp_attributes", InstancePoolGcpAttributes), idle_instance_autotermination_minutes=d.get("idle_instance_autotermination_minutes", None), @@ -4808,6 +5031,7 @@ def from_dict(cls, d: Dict[str, Any]) -> InstancePoolAndStats: instance_pool_name=d.get("instance_pool_name", None), max_capacity=d.get("max_capacity", None), min_idle_instances=d.get("min_idle_instances", None), + node_type_flexibility=_from_dict(d, "node_type_flexibility", NodeTypeFlexibility), node_type_id=d.get("node_type_id", None), preloaded_docker_images=_repeated_dict(d, "preloaded_docker_images", DockerImage), preloaded_spark_versions=d.get("preloaded_spark_versions", None), @@ -5688,6 +5912,39 @@ class ListClustersSortByField(Enum): DEFAULT = "DEFAULT" +@dataclass +class ListDefaultBaseEnvironmentsResponse: + default_base_environments: Optional[List[DefaultBaseEnvironment]] = None + + next_page_token: Optional[str] = None + + def as_dict(self) -> dict: + """Serializes the ListDefaultBaseEnvironmentsResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.default_base_environments: + body["default_base_environments"] = [v.as_dict() for v in self.default_base_environments] + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ListDefaultBaseEnvironmentsResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.default_base_environments: + body["default_base_environments"] = self.default_base_environments + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ListDefaultBaseEnvironmentsResponse: + """Deserializes the ListDefaultBaseEnvironmentsResponse from a dictionary.""" + return cls( + default_base_environments=_repeated_dict(d, "default_base_environments", DefaultBaseEnvironment), + next_page_token=d.get("next_page_token", None), + ) + + @dataclass class ListGlobalInitScriptsResponse: scripts: Optional[List[GlobalInitScriptDetails]] = None @@ -5956,6 +6213,44 @@ def from_dict(cls, d: Dict[str, Any]) -> LogSyncStatus: MapAny = Dict[str, Any] +@dataclass +class MaterializedEnvironment: + """Materialized Environment information enables environment sharing and reuse via Environment + Caching during library installations. Currently this feature is only supported for Python + libraries. + + - If the env cache entry in LMv2 DB doesn't exist or invalid, library installations and + environment materialization will occur. A new Materialized Environment metadata will be sent + from DP upon successful library installations and env materialization, and is persisted into + database by LMv2. - If the env cache entry in LMv2 DB is valid, the Materialized Environment + will be sent to DP by LMv2, and DP will restore the cached environment from a store instead of + reinstalling libraries from scratch. + + If changed, also update estore/namespaces/defaultbaseenvironments/latest.proto with new version""" + + last_updated_timestamp: Optional[int] = None + """The timestamp (in epoch milliseconds) when the materialized env is updated.""" + + def as_dict(self) -> dict: + """Serializes the MaterializedEnvironment into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.last_updated_timestamp is not None: + body["last_updated_timestamp"] = self.last_updated_timestamp + return body + + def as_shallow_dict(self) -> dict: + """Serializes the MaterializedEnvironment into a shallow dictionary of its immediate attributes.""" + body = {} + if self.last_updated_timestamp is not None: + body["last_updated_timestamp"] = self.last_updated_timestamp + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> MaterializedEnvironment: + """Deserializes the MaterializedEnvironment from a dictionary.""" + return cls(last_updated_timestamp=d.get("last_updated_timestamp", None)) + + @dataclass class MavenLibrary: coordinates: str @@ -6250,6 +6545,28 @@ def from_dict(cls, d: Dict[str, Any]) -> NodeType: ) +@dataclass +class NodeTypeFlexibility: + """For Fleet-V2 using classic clusters, this object contains the information about the alternate + node type ids to use when attempting to launch a cluster. It can be used with both the driver + and worker node types.""" + + def as_dict(self) -> dict: + """Serializes the NodeTypeFlexibility into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + def as_shallow_dict(self) -> dict: + """Serializes the NodeTypeFlexibility into a shallow dictionary of its immediate attributes.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> NodeTypeFlexibility: + """Deserializes the NodeTypeFlexibility from a dictionary.""" + return cls() + + @dataclass class PendingInstanceError: """Error message of a failed pending instances""" @@ -6562,6 +6879,24 @@ def from_dict(cls, d: Dict[str, Any]) -> RCranLibrary: return cls(package=d.get("package", None), repo=d.get("repo", None)) +@dataclass +class RefreshDefaultBaseEnvironmentsResponse: + def as_dict(self) -> dict: + """Serializes the RefreshDefaultBaseEnvironmentsResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + def as_shallow_dict(self) -> dict: + """Serializes the RefreshDefaultBaseEnvironmentsResponse into a shallow dictionary of its immediate attributes.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> RefreshDefaultBaseEnvironmentsResponse: + """Deserializes the RefreshDefaultBaseEnvironmentsResponse from a dictionary.""" + return cls() + + @dataclass class RemoveResponse: def as_dict(self) -> dict: @@ -9882,11 +10217,13 @@ def create( azure_attributes: Optional[InstancePoolAzureAttributes] = None, custom_tags: Optional[Dict[str, str]] = None, disk_spec: Optional[DiskSpec] = None, + enable_auto_alternate_node_types: Optional[bool] = None, enable_elastic_disk: Optional[bool] = None, gcp_attributes: Optional[InstancePoolGcpAttributes] = None, idle_instance_autotermination_minutes: Optional[int] = None, max_capacity: Optional[int] = None, min_idle_instances: Optional[int] = None, + node_type_flexibility: Optional[NodeTypeFlexibility] = None, preloaded_docker_images: Optional[List[DockerImage]] = None, preloaded_spark_versions: Optional[List[str]] = None, remote_disk_throughput: Optional[int] = None, @@ -9915,6 +10252,9 @@ def create( - Currently, Databricks allows at most 45 custom tags :param disk_spec: :class:`DiskSpec` (optional) Defines the specification of the disks that will be attached to all spark containers. + :param enable_auto_alternate_node_types: bool (optional) + For pools with node type flexibility (Fleet-V2), whether auto generated alternate node type ids are + enabled. This field should not be true if node_type_flexibility is set. :param enable_elastic_disk: bool (optional) Autoscaling Local Storage: when enabled, this instances in this pool will dynamically acquire additional disk space when its Spark workers are running low on disk space. In AWS, this feature @@ -9934,6 +10274,10 @@ def create( upsize requests. :param min_idle_instances: int (optional) Minimum number of idle instances to keep in the instance pool + :param node_type_flexibility: :class:`NodeTypeFlexibility` (optional) + For pools with node type flexibility (Fleet-V2), this object contains the information about the + alternate node type ids to use when attempting to launch a cluster if the node type id is not + available. This field should not be set if enable_auto_alternate_node_types is true. :param preloaded_docker_images: List[:class:`DockerImage`] (optional) Custom Docker Image BYOC :param preloaded_spark_versions: List[str] (optional) @@ -9959,6 +10303,8 @@ def create( body["custom_tags"] = custom_tags if disk_spec is not None: body["disk_spec"] = disk_spec.as_dict() + if enable_auto_alternate_node_types is not None: + body["enable_auto_alternate_node_types"] = enable_auto_alternate_node_types if enable_elastic_disk is not None: body["enable_elastic_disk"] = enable_elastic_disk if gcp_attributes is not None: @@ -9971,6 +10317,8 @@ def create( body["max_capacity"] = max_capacity if min_idle_instances is not None: body["min_idle_instances"] = min_idle_instances + if node_type_flexibility is not None: + body["node_type_flexibility"] = node_type_flexibility.as_dict() if node_type_id is not None: body["node_type_id"] = node_type_id if preloaded_docker_images is not None: @@ -10015,9 +10363,11 @@ def edit( node_type_id: str, *, custom_tags: Optional[Dict[str, str]] = None, + enable_auto_alternate_node_types: Optional[bool] = None, idle_instance_autotermination_minutes: Optional[int] = None, max_capacity: Optional[int] = None, min_idle_instances: Optional[int] = None, + node_type_flexibility: Optional[NodeTypeFlexibility] = None, remote_disk_throughput: Optional[int] = None, total_initial_remote_disk_size: Optional[int] = None, ): @@ -10038,6 +10388,9 @@ def edit( EBS volumes) with these tags in addition to `default_tags`. Notes: - Currently, Databricks allows at most 45 custom tags + :param enable_auto_alternate_node_types: bool (optional) + For pools with node type flexibility (Fleet-V2), whether auto generated alternate node type ids are + enabled. This field should not be true if node_type_flexibility is set. :param idle_instance_autotermination_minutes: int (optional) Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met. If not set, the extra pool instances @@ -10050,6 +10403,10 @@ def edit( upsize requests. :param min_idle_instances: int (optional) Minimum number of idle instances to keep in the instance pool + :param node_type_flexibility: :class:`NodeTypeFlexibility` (optional) + For pools with node type flexibility (Fleet-V2), this object contains the information about the + alternate node type ids to use when attempting to launch a cluster if the node type id is not + available. This field should not be set if enable_auto_alternate_node_types is true. :param remote_disk_throughput: int (optional) If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only supported for GCP HYPERDISK_BALANCED types. @@ -10063,6 +10420,8 @@ def edit( body = {} if custom_tags is not None: body["custom_tags"] = custom_tags + if enable_auto_alternate_node_types is not None: + body["enable_auto_alternate_node_types"] = enable_auto_alternate_node_types if idle_instance_autotermination_minutes is not None: body["idle_instance_autotermination_minutes"] = idle_instance_autotermination_minutes if instance_pool_id is not None: @@ -10073,6 +10432,8 @@ def edit( body["max_capacity"] = max_capacity if min_idle_instances is not None: body["min_idle_instances"] = min_idle_instances + if node_type_flexibility is not None: + body["node_type_flexibility"] = node_type_flexibility.as_dict() if node_type_id is not None: body["node_type_id"] = node_type_id if remote_disk_throughput is not None: @@ -10422,6 +10783,75 @@ def cluster_status(self, cluster_id: str) -> Iterator[LibraryFullStatus]: parsed = ClusterLibraryStatuses.from_dict(json).library_statuses return parsed if parsed is not None else [] + def create_default_base_environment( + self, default_base_environment: DefaultBaseEnvironment, *, request_id: Optional[str] = None + ) -> DefaultBaseEnvironment: + """Create a default base environment within workspaces to define the environment version and a list of + dependencies to be used in serverless notebooks and jobs. This process will asynchronously generate a + cache to optimize dependency resolution. + + :param default_base_environment: :class:`DefaultBaseEnvironment` + :param request_id: str (optional) + A unique identifier for this request. A random UUID is recommended. This request is only idempotent + if a `request_id` is provided. + + :returns: :class:`DefaultBaseEnvironment` + """ + + if request_id is None or request_id == "": + request_id = str(uuid.uuid4()) + body = {} + if default_base_environment is not None: + body["default_base_environment"] = default_base_environment.as_dict() + if request_id is not None: + body["request_id"] = request_id + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", "/api/2.0/default-base-environments", body=body, headers=headers) + return DefaultBaseEnvironment.from_dict(res) + + def delete_default_base_environment(self, id: str): + """Delete the default base environment given an ID. The default base environment may be used by + downstream workloads. Please ensure that the deletion is intentional. + + :param id: str + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do("DELETE", f"/api/2.0/default-base-environments/{id}", headers=headers) + + def get_default_base_environment(self, id: str, *, trace_id: Optional[str] = None) -> DefaultBaseEnvironment: + """Return the default base environment details for a given ID. + + :param id: str + :param trace_id: str (optional) + Deprecated: use ctx.requestId instead + + :returns: :class:`DefaultBaseEnvironment` + """ + + query = {} + if id is not None: + query["id"] = id + if trace_id is not None: + query["trace_id"] = trace_id + headers = { + "Accept": "application/json", + } + + res = self._api.do( + "GET", "/api/2.0/default-base-environments:getDefaultBaseEnvironment", query=query, headers=headers + ) + return DefaultBaseEnvironment.from_dict(res) + def install(self, cluster_id: str, libraries: List[Library]): """Add libraries to install on a cluster. The installation is asynchronous; it happens in the background after the completion of this request. @@ -10446,6 +10876,54 @@ def install(self, cluster_id: str, libraries: List[Library]): self._api.do("POST", "/api/2.0/libraries/install", body=body, headers=headers) + def list_default_base_environments( + self, *, page_size: Optional[int] = None, page_token: Optional[str] = None + ) -> Iterator[DefaultBaseEnvironment]: + """List default base environments defined in the workspaces for the requested user. + + :param page_size: int (optional) + :param page_token: str (optional) + + :returns: Iterator over :class:`DefaultBaseEnvironment` + """ + + query = {} + if page_size is not None: + query["page_size"] = page_size + if page_token is not None: + query["page_token"] = page_token + headers = { + "Accept": "application/json", + } + + while True: + json = self._api.do("GET", "/api/2.0/default-base-environments", query=query, headers=headers) + if "default_base_environments" in json: + for v in json["default_base_environments"]: + yield DefaultBaseEnvironment.from_dict(v) + if "next_page_token" not in json or not json["next_page_token"]: + return + query["page_token"] = json["next_page_token"] + + def refresh_default_base_environments(self, ids: List[str]): + """Refresh the cached default base environments for the given IDs. This process will asynchronously + regenerate the caches. The existing caches remains available until it expires. + + :param ids: List[str] + + + """ + + body = {} + if ids is not None: + body["ids"] = [v for v in ids] + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + self._api.do("POST", "/api/2.0/default-base-environments/refresh", body=body, headers=headers) + def uninstall(self, cluster_id: str, libraries: List[Library]): """Set libraries to uninstall from a cluster. The libraries won't be uninstalled until the cluster is restarted. A request to uninstall a library that is not currently installed is ignored. @@ -10470,6 +10948,54 @@ def uninstall(self, cluster_id: str, libraries: List[Library]): self._api.do("POST", "/api/2.0/libraries/uninstall", body=body, headers=headers) + def update_default_base_environment( + self, id: str, default_base_environment: DefaultBaseEnvironment + ) -> DefaultBaseEnvironment: + """Update the default base environment for the given ID. This process will asynchronously regenerate the + cache. The existing cache remains available until it expires. + + :param id: str + :param default_base_environment: :class:`DefaultBaseEnvironment` + + :returns: :class:`DefaultBaseEnvironment` + """ + + body = {} + if default_base_environment is not None: + body["default_base_environment"] = default_base_environment.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("PATCH", f"/api/2.0/default-base-environments/{id}", body=body, headers=headers) + return DefaultBaseEnvironment.from_dict(res) + + def update_default_default_base_environment( + self, *, base_environment_type: Optional[BaseEnvironmentType] = None, id: Optional[str] = None + ) -> DefaultBaseEnvironment: + """Set the default base environment for the workspace. This marks the specified DBE as the workspace + default. + + :param base_environment_type: :class:`BaseEnvironmentType` (optional) + :param id: str (optional) + + :returns: :class:`DefaultBaseEnvironment` + """ + + body = {} + if base_environment_type is not None: + body["base_environment_type"] = base_environment_type.value + if id is not None: + body["id"] = id + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", "/api/2.0/default-base-environments:setDefault", body=body, headers=headers) + return DefaultBaseEnvironment.from_dict(res) + class PolicyComplianceForClustersAPI: """The policy compliance APIs allow you to view and manage the policy compliance status of clusters in your diff --git a/databricks/sdk/service/dashboards.py b/databricks/sdk/service/dashboards.py index 5bf772f27..b7e68de0e 100755 --- a/databricks/sdk/service/dashboards.py +++ b/databricks/sdk/service/dashboards.py @@ -103,6 +103,72 @@ def from_dict(cls, d: Dict[str, Any]) -> AuthorizationDetailsGrantRule: return cls(permission_set=d.get("permission_set", None)) +@dataclass +class CancelQueryExecutionResponse: + status: Optional[List[CancelQueryExecutionResponseStatus]] = None + + def as_dict(self) -> dict: + """Serializes the CancelQueryExecutionResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.status: + body["status"] = [v.as_dict() for v in self.status] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the CancelQueryExecutionResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.status: + body["status"] = self.status + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> CancelQueryExecutionResponse: + """Deserializes the CancelQueryExecutionResponse from a dictionary.""" + return cls(status=_repeated_dict(d, "status", CancelQueryExecutionResponseStatus)) + + +@dataclass +class CancelQueryExecutionResponseStatus: + data_token: str + """The token to poll for result asynchronously Example: + EC0A..ChAB7WCEn_4Qo4vkLqEbXsxxEgh3Y2pbWw45WhoQXgZSQo9aS5q2ZvFcbvbx9CgA-PAEAQ""" + + pending: Optional[Empty] = None + + success: Optional[Empty] = None + + def as_dict(self) -> dict: + """Serializes the CancelQueryExecutionResponseStatus into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.data_token is not None: + body["data_token"] = self.data_token + if self.pending: + body["pending"] = self.pending.as_dict() + if self.success: + body["success"] = self.success.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the CancelQueryExecutionResponseStatus into a shallow dictionary of its immediate attributes.""" + body = {} + if self.data_token is not None: + body["data_token"] = self.data_token + if self.pending: + body["pending"] = self.pending + if self.success: + body["success"] = self.success + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> CancelQueryExecutionResponseStatus: + """Deserializes the CancelQueryExecutionResponseStatus from a dictionary.""" + return cls( + data_token=d.get("data_token", None), + pending=_from_dict(d, "pending", Empty), + success=_from_dict(d, "success", Empty), + ) + + @dataclass class CronSchedule: quartz_cron_expression: str @@ -254,6 +320,45 @@ class DashboardView(Enum): DASHBOARD_VIEW_BASIC = "DASHBOARD_VIEW_BASIC" +@dataclass +class Empty: + """Represents an empty message, similar to google.protobuf.Empty, which is not available in the + firm right now.""" + + def as_dict(self) -> dict: + """Serializes the Empty into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + def as_shallow_dict(self) -> dict: + """Serializes the Empty into a shallow dictionary of its immediate attributes.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> Empty: + """Deserializes the Empty from a dictionary.""" + return cls() + + +@dataclass +class ExecuteQueryResponse: + def as_dict(self) -> dict: + """Serializes the ExecuteQueryResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ExecuteQueryResponse into a shallow dictionary of its immediate attributes.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ExecuteQueryResponse: + """Deserializes the ExecuteQueryResponse from a dictionary.""" + return cls() + + @dataclass class GenieAttachment: """Genie AI Response""" @@ -426,12 +531,17 @@ def from_dict(cls, d: Dict[str, Any]) -> GenieConversationSummary: class GenieFeedback: """Feedback containing rating and optional comment""" + comment: Optional[str] = None + """Optional feedback comment text""" + rating: Optional[GenieFeedbackRating] = None """The feedback rating""" def as_dict(self) -> dict: """Serializes the GenieFeedback into a dictionary suitable for use as a JSON request body.""" body = {} + if self.comment is not None: + body["comment"] = self.comment if self.rating is not None: body["rating"] = self.rating.value return body @@ -439,6 +549,8 @@ def as_dict(self) -> dict: def as_shallow_dict(self) -> dict: """Serializes the GenieFeedback into a shallow dictionary of its immediate attributes.""" body = {} + if self.comment is not None: + body["comment"] = self.comment if self.rating is not None: body["rating"] = self.rating return body @@ -446,7 +558,7 @@ def as_shallow_dict(self) -> dict: @classmethod def from_dict(cls, d: Dict[str, Any]) -> GenieFeedback: """Deserializes the GenieFeedback from a dictionary.""" - return cls(rating=_enum(d, "rating", GenieFeedbackRating)) + return cls(comment=d.get("comment", None), rating=_enum(d, "rating", GenieFeedbackRating)) class GenieFeedbackRating(Enum): @@ -457,6 +569,57 @@ class GenieFeedbackRating(Enum): POSITIVE = "POSITIVE" +@dataclass +class GenieGenerateDownloadFullQueryResultResponse: + download_id: Optional[str] = None + """Download ID. Use this ID to track the download request in subsequent polling calls""" + + def as_dict(self) -> dict: + """Serializes the GenieGenerateDownloadFullQueryResultResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.download_id is not None: + body["download_id"] = self.download_id + return body + + def as_shallow_dict(self) -> dict: + """Serializes the GenieGenerateDownloadFullQueryResultResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.download_id is not None: + body["download_id"] = self.download_id + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> GenieGenerateDownloadFullQueryResultResponse: + """Deserializes the GenieGenerateDownloadFullQueryResultResponse from a dictionary.""" + return cls(download_id=d.get("download_id", None)) + + +@dataclass +class GenieGetDownloadFullQueryResultResponse: + statement_response: Optional[sql.StatementResponse] = None + """SQL Statement Execution response. See [Get status, manifest, and result first + chunk](:method:statementexecution/getstatement) for more details.""" + + def as_dict(self) -> dict: + """Serializes the GenieGetDownloadFullQueryResultResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.statement_response: + body["statement_response"] = self.statement_response.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the GenieGetDownloadFullQueryResultResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.statement_response: + body["statement_response"] = self.statement_response + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> GenieGetDownloadFullQueryResultResponse: + """Deserializes the GenieGetDownloadFullQueryResultResponse from a dictionary.""" + return cls(statement_response=_from_dict(d, "statement_response", sql.StatementResponse)) + + @dataclass class GenieGetMessageQueryResultResponse: statement_response: Optional[sql.StatementResponse] = None @@ -946,6 +1109,24 @@ def from_dict(cls, d: Dict[str, Any]) -> GenieSuggestedQuestionsAttachment: return cls(questions=d.get("questions", None)) +@dataclass +class GetPublishedDashboardEmbeddedResponse: + def as_dict(self) -> dict: + """Serializes the GetPublishedDashboardEmbeddedResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + def as_shallow_dict(self) -> dict: + """Serializes the GetPublishedDashboardEmbeddedResponse into a shallow dictionary of its immediate attributes.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> GetPublishedDashboardEmbeddedResponse: + """Deserializes the GetPublishedDashboardEmbeddedResponse from a dictionary.""" + return cls() + + @dataclass class GetPublishedDashboardTokenInfoResponse: authorization_details: Optional[List[AuthorizationDetails]] = None @@ -1222,6 +1403,80 @@ class MessageStatus(Enum): SUBMITTED = "SUBMITTED" +@dataclass +class PendingStatus: + data_token: str + """The token to poll for result asynchronously Example: + EC0A..ChAB7WCEn_4Qo4vkLqEbXsxxEgh3Y2pbWw45WhoQXgZSQo9aS5q2ZvFcbvbx9CgA-PAEAQ""" + + def as_dict(self) -> dict: + """Serializes the PendingStatus into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.data_token is not None: + body["data_token"] = self.data_token + return body + + def as_shallow_dict(self) -> dict: + """Serializes the PendingStatus into a shallow dictionary of its immediate attributes.""" + body = {} + if self.data_token is not None: + body["data_token"] = self.data_token + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> PendingStatus: + """Deserializes the PendingStatus from a dictionary.""" + return cls(data_token=d.get("data_token", None)) + + +@dataclass +class PollQueryStatusResponse: + data: Optional[List[PollQueryStatusResponseData]] = None + + def as_dict(self) -> dict: + """Serializes the PollQueryStatusResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.data: + body["data"] = [v.as_dict() for v in self.data] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the PollQueryStatusResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.data: + body["data"] = self.data + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> PollQueryStatusResponse: + """Deserializes the PollQueryStatusResponse from a dictionary.""" + return cls(data=_repeated_dict(d, "data", PollQueryStatusResponseData)) + + +@dataclass +class PollQueryStatusResponseData: + status: QueryResponseStatus + + def as_dict(self) -> dict: + """Serializes the PollQueryStatusResponseData into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.status: + body["status"] = self.status.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the PollQueryStatusResponseData into a shallow dictionary of its immediate attributes.""" + body = {} + if self.status: + body["status"] = self.status + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> PollQueryStatusResponseData: + """Deserializes the PollQueryStatusResponseData from a dictionary.""" + return cls(status=_from_dict(d, "status", QueryResponseStatus)) + + @dataclass class PublishedDashboard: display_name: Optional[str] = None @@ -1309,6 +1564,69 @@ def from_dict(cls, d: Dict[str, Any]) -> QueryAttachmentParameter: return cls(keyword=d.get("keyword", None), sql_type=d.get("sql_type", None), value=d.get("value", None)) +@dataclass +class QueryResponseStatus: + canceled: Optional[Empty] = None + + closed: Optional[Empty] = None + + pending: Optional[PendingStatus] = None + + statement_id: Optional[str] = None + """The statement id in format(01eef5da-c56e-1f36-bafa-21906587d6ba) The statement_id should be + identical to data_token in SuccessStatus and PendingStatus. This field is created for audit + logging purpose to record the statement_id of all QueryResponseStatus.""" + + success: Optional[SuccessStatus] = None + + def as_dict(self) -> dict: + """Serializes the QueryResponseStatus into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.canceled: + body["canceled"] = self.canceled.as_dict() + if self.closed: + body["closed"] = self.closed.as_dict() + if self.pending: + body["pending"] = self.pending.as_dict() + if self.statement_id is not None: + body["statement_id"] = self.statement_id + if self.success: + body["success"] = self.success.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the QueryResponseStatus into a shallow dictionary of its immediate attributes.""" + body = {} + if self.canceled: + body["canceled"] = self.canceled + if self.closed: + body["closed"] = self.closed + if self.pending: + body["pending"] = self.pending + if self.statement_id is not None: + body["statement_id"] = self.statement_id + if self.success: + body["success"] = self.success + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> QueryResponseStatus: + """Deserializes the QueryResponseStatus from a dictionary.""" + return cls( + canceled=_from_dict(d, "canceled", Empty), + closed=_from_dict(d, "closed", Empty), + pending=_from_dict(d, "pending", PendingStatus), + statement_id=d.get("statement_id", None), + success=_from_dict(d, "success", SuccessStatus), + ) + + +class ResponsePhase(Enum): + + RESPONSE_PHASE_THINKING = "RESPONSE_PHASE_THINKING" + RESPONSE_PHASE_VERIFYING = "RESPONSE_PHASE_VERIFYING" + + @dataclass class Result: is_truncated: Optional[bool] = None @@ -1632,6 +1950,39 @@ def from_dict(cls, d: Dict[str, Any]) -> SubscriptionSubscriberUser: return cls(user_id=d.get("user_id", None)) +@dataclass +class SuccessStatus: + data_token: str + """The token to poll for result asynchronously Example: + EC0A..ChAB7WCEn_4Qo4vkLqEbXsxxEgh3Y2pbWw45WhoQXgZSQo9aS5q2ZvFcbvbx9CgA-PAEAQ""" + + truncated: Optional[bool] = None + """Whether the query result is truncated (either by byte limit or row limit)""" + + def as_dict(self) -> dict: + """Serializes the SuccessStatus into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.data_token is not None: + body["data_token"] = self.data_token + if self.truncated is not None: + body["truncated"] = self.truncated + return body + + def as_shallow_dict(self) -> dict: + """Serializes the SuccessStatus into a shallow dictionary of its immediate attributes.""" + body = {} + if self.data_token is not None: + body["data_token"] = self.data_token + if self.truncated is not None: + body["truncated"] = self.truncated + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> SuccessStatus: + """Deserializes the SuccessStatus from a dictionary.""" + return cls(data_token=d.get("data_token", None), truncated=d.get("truncated", None)) + + @dataclass class TextAttachment: content: Optional[str] = None @@ -1639,6 +1990,14 @@ class TextAttachment: id: Optional[str] = None + phase: Optional[ResponsePhase] = None + + purpose: Optional[TextAttachmentPurpose] = None + """Purpose/intent of this text attachment""" + + verification_metadata: Optional[VerificationMetadata] = None + """Metadata for verification phase attachments. Only set when phase = RESPONSE_PHASE_VERIFYING.""" + def as_dict(self) -> dict: """Serializes the TextAttachment into a dictionary suitable for use as a JSON request body.""" body = {} @@ -1646,6 +2005,12 @@ def as_dict(self) -> dict: body["content"] = self.content if self.id is not None: body["id"] = self.id + if self.phase is not None: + body["phase"] = self.phase.value + if self.purpose is not None: + body["purpose"] = self.purpose.value + if self.verification_metadata: + body["verification_metadata"] = self.verification_metadata.as_dict() return body def as_shallow_dict(self) -> dict: @@ -1655,12 +2020,30 @@ def as_shallow_dict(self) -> dict: body["content"] = self.content if self.id is not None: body["id"] = self.id + if self.phase is not None: + body["phase"] = self.phase + if self.purpose is not None: + body["purpose"] = self.purpose + if self.verification_metadata: + body["verification_metadata"] = self.verification_metadata return body @classmethod def from_dict(cls, d: Dict[str, Any]) -> TextAttachment: """Deserializes the TextAttachment from a dictionary.""" - return cls(content=d.get("content", None), id=d.get("id", None)) + return cls( + content=d.get("content", None), + id=d.get("id", None), + phase=_enum(d, "phase", ResponsePhase), + purpose=_enum(d, "purpose", TextAttachmentPurpose), + verification_metadata=_from_dict(d, "verification_metadata", VerificationMetadata), + ) + + +class TextAttachmentPurpose(Enum): + """Purpose/intent of a text attachment""" + + FOLLOW_UP_QUESTION = "FOLLOW_UP_QUESTION" @dataclass @@ -1699,6 +2082,49 @@ def from_dict(cls, d: Dict[str, Any]) -> UnpublishDashboardResponse: return cls() +@dataclass +class VerificationMetadata: + """Metadata for verification phase attachments""" + + index: Optional[int] = None + """Optional index to help order attachments within the same section""" + + section: Optional[VerificationSection] = None + + def as_dict(self) -> dict: + """Serializes the VerificationMetadata into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.index is not None: + body["index"] = self.index + if self.section is not None: + body["section"] = self.section.value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the VerificationMetadata into a shallow dictionary of its immediate attributes.""" + body = {} + if self.index is not None: + body["index"] = self.index + if self.section is not None: + body["section"] = self.section + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> VerificationMetadata: + """Deserializes the VerificationMetadata from a dictionary.""" + return cls(index=d.get("index", None), section=_enum(d, "section", VerificationSection)) + + +class VerificationSection(Enum): + """Verification workflow section - indicates which stage of verification this attachment belongs to + These sections are used for grouping and ordering attachments in the frontend UI""" + + VERIFICATION_SECTION_FINAL_DECISION = "VERIFICATION_SECTION_FINAL_DECISION" + VERIFICATION_SECTION_PROPOSED_IMPROVEMENT = "VERIFICATION_SECTION_PROPOSED_IMPROVEMENT" + VERIFICATION_SECTION_SQL_EXAMPLES_VALIDATION = "VERIFICATION_SECTION_SQL_EXAMPLES_VALIDATION" + VERIFICATION_SECTION_VERIFICATION_QUERIES = "VERIFICATION_SECTION_VERIFICATION_QUERIES" + + class GenieAPI: """Genie provides a no-code experience for business users, powered by AI/BI. Analysts set up spaces that business users can use to ask questions using natural language. Genie uses data registered to Unity @@ -1787,6 +2213,50 @@ def create_message_and_wait( timeout=timeout ) + def create_space( + self, + warehouse_id: str, + serialized_space: str, + *, + description: Optional[str] = None, + parent_path: Optional[str] = None, + title: Optional[str] = None, + ) -> GenieSpace: + """Creates a Genie space from a serialized payload. + + :param warehouse_id: str + Warehouse to associate with the new space + :param serialized_space: str + Serialized export model for the space contents + :param description: str (optional) + Optional description + :param parent_path: str (optional) + Parent folder path where the space will be registered + :param title: str (optional) + Optional title override + + :returns: :class:`GenieSpace` + """ + + body = {} + if description is not None: + body["description"] = description + if parent_path is not None: + body["parent_path"] = parent_path + if serialized_space is not None: + body["serialized_space"] = serialized_space + if title is not None: + body["title"] = title + if warehouse_id is not None: + body["warehouse_id"] = warehouse_id + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", "/api/2.0/genie/spaces", body=body, headers=headers) + return GenieSpace.from_dict(res) + def delete_conversation(self, space_id: str, conversation_id: str): """Delete a conversation. @@ -1883,6 +2353,75 @@ def execute_message_query( ) return GenieGetMessageQueryResultResponse.from_dict(res) + def generate_download_full_query_result( + self, space_id: str, conversation_id: str, message_id: str, attachment_id: str + ) -> GenieGenerateDownloadFullQueryResultResponse: + """Initiates a new SQL execution and returns a `download_id` that you can use to track the progress of + the download. The query result is stored in an external link and can be retrieved using the [Get + Download Full Query Result](:method:genie/getdownloadfullqueryresult) API. Warning: Databricks + strongly recommends that you protect the URLs that are returned by the `EXTERNAL_LINKS` disposition. + See [Execute Statement](:method:statementexecution/executestatement) for more details. + + :param space_id: str + Genie space ID + :param conversation_id: str + Conversation ID + :param message_id: str + Message ID + :param attachment_id: str + Attachment ID + + :returns: :class:`GenieGenerateDownloadFullQueryResultResponse` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do( + "POST", + f"/api/2.0/genie/spaces/{space_id}/conversations/{conversation_id}/messages/{message_id}/attachments/{attachment_id}/downloads", + headers=headers, + ) + return GenieGenerateDownloadFullQueryResultResponse.from_dict(res) + + def get_download_full_query_result( + self, space_id: str, conversation_id: str, message_id: str, attachment_id: str, download_id: str + ) -> GenieGetDownloadFullQueryResultResponse: + """After [Generating a Full Query Result Download](:method:genie/getdownloadfullqueryresult) and + successfully receiving a `download_id`, use this API to poll the download progress. When the download + is complete, the API returns one or more external links to the query result files. Warning: Databricks + strongly recommends that you protect the URLs that are returned by the `EXTERNAL_LINKS` disposition. + You must not set an Authorization header in download requests. When using the `EXTERNAL_LINKS` + disposition, Databricks returns presigned URLs that grant temporary access to data. See [Execute + Statement](:method:statementexecution/executestatement) for more details. + + :param space_id: str + Genie space ID + :param conversation_id: str + Conversation ID + :param message_id: str + Message ID + :param attachment_id: str + Attachment ID + :param download_id: str + Download ID. This ID is provided by the [Generate Download + endpoint](:method:genie/generateDownloadFullQueryResult) + + :returns: :class:`GenieGetDownloadFullQueryResultResponse` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do( + "GET", + f"/api/2.0/genie/spaces/{space_id}/conversations/{conversation_id}/messages/{message_id}/attachments/{attachment_id}/downloads/{download_id}", + headers=headers, + ) + return GenieGetDownloadFullQueryResultResponse.from_dict(res) + def get_message(self, space_id: str, conversation_id: str, message_id: str) -> GenieMessage: """Get message from conversation. @@ -2104,7 +2643,15 @@ def list_spaces( res = self._api.do("GET", "/api/2.0/genie/spaces", query=query, headers=headers) return GenieListSpacesResponse.from_dict(res) - def send_message_feedback(self, space_id: str, conversation_id: str, message_id: str, rating: GenieFeedbackRating): + def send_message_feedback( + self, + space_id: str, + conversation_id: str, + message_id: str, + rating: GenieFeedbackRating, + *, + comment: Optional[str] = None, + ): """Send feedback for a message. :param space_id: str @@ -2115,11 +2662,15 @@ def send_message_feedback(self, space_id: str, conversation_id: str, message_id: The ID associated with the message to provide feedback for. :param rating: :class:`GenieFeedbackRating` The rating (POSITIVE, NEGATIVE, or NONE). + :param comment: str (optional) + Optional text feedback that will be stored as a comment. """ body = {} + if comment is not None: + body["comment"] = comment if rating is not None: body["rating"] = rating.value headers = { @@ -2184,6 +2735,48 @@ def trash_space(self, space_id: str): self._api.do("DELETE", f"/api/2.0/genie/spaces/{space_id}", headers=headers) + def update_space( + self, + space_id: str, + *, + description: Optional[str] = None, + serialized_space: Optional[str] = None, + title: Optional[str] = None, + warehouse_id: Optional[str] = None, + ) -> GenieSpace: + """Updates a Genie space with a serialized payload. + + :param space_id: str + Genie space ID + :param description: str (optional) + Optional description + :param serialized_space: str (optional) + Serialized export model for the space contents (full replacement) + :param title: str (optional) + Optional title override + :param warehouse_id: str (optional) + Optional warehouse override + + :returns: :class:`GenieSpace` + """ + + body = {} + if description is not None: + body["description"] = description + if serialized_space is not None: + body["serialized_space"] = serialized_space + if title is not None: + body["title"] = title + if warehouse_id is not None: + body["warehouse_id"] = warehouse_id + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("PATCH", f"/api/2.0/genie/spaces/{space_id}", body=body, headers=headers) + return GenieSpace.from_dict(res) + class LakeviewAPI: """These APIs provide specific management operations for Lakeview dashboards. Generic resource management can @@ -2664,6 +3257,21 @@ class LakeviewEmbeddedAPI: def __init__(self, api_client): self._api = api_client + def get_published_dashboard_embedded(self, dashboard_id: str): + """Get the current published dashboard within an embedded context. + + :param dashboard_id: str + UUID identifying the published dashboard. + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do("GET", f"/api/2.0/lakeview/dashboards/{dashboard_id}/published/embedded", headers=headers) + def get_published_dashboard_token_info( self, dashboard_id: str, *, external_value: Optional[str] = None, external_viewer_id: Optional[str] = None ) -> GetPublishedDashboardTokenInfoResponse: @@ -2692,3 +3300,94 @@ def get_published_dashboard_token_info( "GET", f"/api/2.0/lakeview/dashboards/{dashboard_id}/published/tokeninfo", query=query, headers=headers ) return GetPublishedDashboardTokenInfoResponse.from_dict(res) + + +class QueryExecutionAPI: + """Query execution APIs for AI / BI Dashboards""" + + def __init__(self, api_client): + self._api = api_client + + def cancel_published_query_execution( + self, dashboard_name: str, dashboard_revision_id: str, *, tokens: Optional[List[str]] = None + ) -> CancelQueryExecutionResponse: + """Cancel the results for the a query for a published, embedded dashboard. + + :param dashboard_name: str + :param dashboard_revision_id: str + :param tokens: List[str] (optional) + Example: EC0A..ChAB7WCEn_4Qo4vkLqEbXsxxEgh3Y2pbWw45WhoQXgZSQo9aS5q2ZvFcbvbx9CgA-PAEAQ + + :returns: :class:`CancelQueryExecutionResponse` + """ + + query = {} + if dashboard_name is not None: + query["dashboard_name"] = dashboard_name + if dashboard_revision_id is not None: + query["dashboard_revision_id"] = dashboard_revision_id + if tokens is not None: + query["tokens"] = [v for v in tokens] + headers = { + "Accept": "application/json", + } + + res = self._api.do("DELETE", "/api/2.0/lakeview-query/query/published", query=query, headers=headers) + return CancelQueryExecutionResponse.from_dict(res) + + def execute_published_dashboard_query( + self, dashboard_name: str, dashboard_revision_id: str, *, override_warehouse_id: Optional[str] = None + ): + """Execute a query for a published dashboard. + + :param dashboard_name: str + Dashboard name and revision_id is required to retrieve PublishedDatasetDataModel which contains the + list of datasets, warehouse_id, and embedded_credentials + :param dashboard_revision_id: str + :param override_warehouse_id: str (optional) + A dashboard schedule can override the warehouse used as compute for processing the published + dashboard queries + + + """ + + body = {} + if dashboard_name is not None: + body["dashboard_name"] = dashboard_name + if dashboard_revision_id is not None: + body["dashboard_revision_id"] = dashboard_revision_id + if override_warehouse_id is not None: + body["override_warehouse_id"] = override_warehouse_id + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + self._api.do("POST", "/api/2.0/lakeview-query/query/published", body=body, headers=headers) + + def poll_published_query_status( + self, dashboard_name: str, dashboard_revision_id: str, *, tokens: Optional[List[str]] = None + ) -> PollQueryStatusResponse: + """Poll the results for the a query for a published, embedded dashboard. + + :param dashboard_name: str + :param dashboard_revision_id: str + :param tokens: List[str] (optional) + Example: EC0A..ChAB7WCEn_4Qo4vkLqEbXsxxEgh3Y2pbWw45WhoQXgZSQo9aS5q2ZvFcbvbx9CgA-PAEAQ + + :returns: :class:`PollQueryStatusResponse` + """ + + query = {} + if dashboard_name is not None: + query["dashboard_name"] = dashboard_name + if dashboard_revision_id is not None: + query["dashboard_revision_id"] = dashboard_revision_id + if tokens is not None: + query["tokens"] = [v for v in tokens] + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", "/api/2.0/lakeview-query/query/published", query=query, headers=headers) + return PollQueryStatusResponse.from_dict(res) diff --git a/databricks/sdk/service/database.py b/databricks/sdk/service/database.py index b0bbbd7cb..cad102d20 100755 --- a/databricks/sdk/service/database.py +++ b/databricks/sdk/service/database.py @@ -65,6 +65,12 @@ class DatabaseCatalog: create_database_if_not_exists: Optional[bool] = None + database_branch_id: Optional[str] = None + """The branch_id of the database branch associated with the catalog.""" + + database_project_id: Optional[str] = None + """The project_id of the database project associated with the catalog.""" + uid: Optional[str] = None def as_dict(self) -> dict: @@ -72,10 +78,14 @@ def as_dict(self) -> dict: body = {} if self.create_database_if_not_exists is not None: body["create_database_if_not_exists"] = self.create_database_if_not_exists + if self.database_branch_id is not None: + body["database_branch_id"] = self.database_branch_id if self.database_instance_name is not None: body["database_instance_name"] = self.database_instance_name if self.database_name is not None: body["database_name"] = self.database_name + if self.database_project_id is not None: + body["database_project_id"] = self.database_project_id if self.name is not None: body["name"] = self.name if self.uid is not None: @@ -87,10 +97,14 @@ def as_shallow_dict(self) -> dict: body = {} if self.create_database_if_not_exists is not None: body["create_database_if_not_exists"] = self.create_database_if_not_exists + if self.database_branch_id is not None: + body["database_branch_id"] = self.database_branch_id if self.database_instance_name is not None: body["database_instance_name"] = self.database_instance_name if self.database_name is not None: body["database_name"] = self.database_name + if self.database_project_id is not None: + body["database_project_id"] = self.database_project_id if self.name is not None: body["name"] = self.name if self.uid is not None: @@ -102,8 +116,10 @@ def from_dict(cls, d: Dict[str, Any]) -> DatabaseCatalog: """Deserializes the DatabaseCatalog from a dictionary.""" return cls( create_database_if_not_exists=d.get("create_database_if_not_exists", None), + database_branch_id=d.get("database_branch_id", None), database_instance_name=d.get("database_instance_name", None), database_name=d.get("database_name", None), + database_project_id=d.get("database_project_id", None), name=d.get("name", None), uid=d.get("uid", None), ) @@ -613,6 +629,9 @@ class DatabaseTable: When creating a table in a standard catalog, this field is required. In this scenario, specifying this field will allow targeting an arbitrary postgres database.""" + table_serving_url: Optional[str] = None + """Data serving REST API URL for this table""" + def as_dict(self) -> dict: """Serializes the DatabaseTable into a dictionary suitable for use as a JSON request body.""" body = {} @@ -622,6 +641,8 @@ def as_dict(self) -> dict: body["logical_database_name"] = self.logical_database_name if self.name is not None: body["name"] = self.name + if self.table_serving_url is not None: + body["table_serving_url"] = self.table_serving_url return body def as_shallow_dict(self) -> dict: @@ -633,6 +654,8 @@ def as_shallow_dict(self) -> dict: body["logical_database_name"] = self.logical_database_name if self.name is not None: body["name"] = self.name + if self.table_serving_url is not None: + body["table_serving_url"] = self.table_serving_url return body @classmethod @@ -642,6 +665,7 @@ def from_dict(cls, d: Dict[str, Any]) -> DatabaseTable: database_instance_name=d.get("database_instance_name", None), logical_database_name=d.get("logical_database_name", None), name=d.get("name", None), + table_serving_url=d.get("table_serving_url", None), ) @@ -824,6 +848,9 @@ class NewPipelineSpec: """Custom fields that user can set for pipeline while creating SyncedDatabaseTable. Note that other fields of pipeline are still inferred by table def internally""" + budget_policy_id: Optional[str] = None + """Budget policy of this pipeline.""" + storage_catalog: Optional[str] = None """This field needs to be specified if the destination catalog is a managed postgres catalog. @@ -839,6 +866,8 @@ class NewPipelineSpec: def as_dict(self) -> dict: """Serializes the NewPipelineSpec into a dictionary suitable for use as a JSON request body.""" body = {} + if self.budget_policy_id is not None: + body["budget_policy_id"] = self.budget_policy_id if self.storage_catalog is not None: body["storage_catalog"] = self.storage_catalog if self.storage_schema is not None: @@ -848,6 +877,8 @@ def as_dict(self) -> dict: def as_shallow_dict(self) -> dict: """Serializes the NewPipelineSpec into a shallow dictionary of its immediate attributes.""" body = {} + if self.budget_policy_id is not None: + body["budget_policy_id"] = self.budget_policy_id if self.storage_catalog is not None: body["storage_catalog"] = self.storage_catalog if self.storage_schema is not None: @@ -857,7 +888,11 @@ def as_shallow_dict(self) -> dict: @classmethod def from_dict(cls, d: Dict[str, Any]) -> NewPipelineSpec: """Deserializes the NewPipelineSpec from a dictionary.""" - return cls(storage_catalog=d.get("storage_catalog", None), storage_schema=d.get("storage_schema", None)) + return cls( + budget_policy_id=d.get("budget_policy_id", None), + storage_catalog=d.get("storage_catalog", None), + storage_schema=d.get("storage_schema", None), + ) class ProvisioningInfoState(Enum): @@ -958,6 +993,9 @@ class SyncedDatabaseTable: data_synchronization_status: Optional[SyncedTableStatus] = None """Synced Table data synchronization status""" + database_branch_id: Optional[str] = None + """The branch_id of the database branch associated with the table.""" + database_instance_name: Optional[str] = None """Name of the target database instance. This is required when creating synced database tables in standard catalogs. This is optional when creating synced database tables in registered catalogs. @@ -965,11 +1003,20 @@ class SyncedDatabaseTable: database instance name MUST match that of the registered catalog (or the request will be rejected).""" + database_project_id: Optional[str] = None + """The project_id of the database project associated with the table.""" + + effective_database_branch_id: Optional[str] = None + """The branch_id of the database branch associated with the table.""" + effective_database_instance_name: Optional[str] = None """The name of the database instance that this table is registered to. This field is always returned, and for tables inside database catalogs is inferred database instance associated with the catalog.""" + effective_database_project_id: Optional[str] = None + """The project_id of the database project associated with the table.""" + effective_logical_database_name: Optional[str] = None """The name of the logical database that this table is registered to.""" @@ -987,6 +1034,9 @@ class SyncedDatabaseTable: spec: Optional[SyncedTableSpec] = None + table_serving_url: Optional[str] = None + """Data serving REST API URL for this table""" + unity_catalog_provisioning_state: Optional[ProvisioningInfoState] = None """The provisioning state of the synced table entity in Unity Catalog. This is distinct from the state of the data synchronization pipeline (i.e. the table may be in "ACTIVE" but the pipeline @@ -997,10 +1047,18 @@ def as_dict(self) -> dict: body = {} if self.data_synchronization_status: body["data_synchronization_status"] = self.data_synchronization_status.as_dict() + if self.database_branch_id is not None: + body["database_branch_id"] = self.database_branch_id if self.database_instance_name is not None: body["database_instance_name"] = self.database_instance_name + if self.database_project_id is not None: + body["database_project_id"] = self.database_project_id + if self.effective_database_branch_id is not None: + body["effective_database_branch_id"] = self.effective_database_branch_id if self.effective_database_instance_name is not None: body["effective_database_instance_name"] = self.effective_database_instance_name + if self.effective_database_project_id is not None: + body["effective_database_project_id"] = self.effective_database_project_id if self.effective_logical_database_name is not None: body["effective_logical_database_name"] = self.effective_logical_database_name if self.logical_database_name is not None: @@ -1009,6 +1067,8 @@ def as_dict(self) -> dict: body["name"] = self.name if self.spec: body["spec"] = self.spec.as_dict() + if self.table_serving_url is not None: + body["table_serving_url"] = self.table_serving_url if self.unity_catalog_provisioning_state is not None: body["unity_catalog_provisioning_state"] = self.unity_catalog_provisioning_state.value return body @@ -1018,10 +1078,18 @@ def as_shallow_dict(self) -> dict: body = {} if self.data_synchronization_status: body["data_synchronization_status"] = self.data_synchronization_status + if self.database_branch_id is not None: + body["database_branch_id"] = self.database_branch_id if self.database_instance_name is not None: body["database_instance_name"] = self.database_instance_name + if self.database_project_id is not None: + body["database_project_id"] = self.database_project_id + if self.effective_database_branch_id is not None: + body["effective_database_branch_id"] = self.effective_database_branch_id if self.effective_database_instance_name is not None: body["effective_database_instance_name"] = self.effective_database_instance_name + if self.effective_database_project_id is not None: + body["effective_database_project_id"] = self.effective_database_project_id if self.effective_logical_database_name is not None: body["effective_logical_database_name"] = self.effective_logical_database_name if self.logical_database_name is not None: @@ -1030,6 +1098,8 @@ def as_shallow_dict(self) -> dict: body["name"] = self.name if self.spec: body["spec"] = self.spec + if self.table_serving_url is not None: + body["table_serving_url"] = self.table_serving_url if self.unity_catalog_provisioning_state is not None: body["unity_catalog_provisioning_state"] = self.unity_catalog_provisioning_state return body @@ -1039,12 +1109,17 @@ def from_dict(cls, d: Dict[str, Any]) -> SyncedDatabaseTable: """Deserializes the SyncedDatabaseTable from a dictionary.""" return cls( data_synchronization_status=_from_dict(d, "data_synchronization_status", SyncedTableStatus), + database_branch_id=d.get("database_branch_id", None), database_instance_name=d.get("database_instance_name", None), + database_project_id=d.get("database_project_id", None), + effective_database_branch_id=d.get("effective_database_branch_id", None), effective_database_instance_name=d.get("effective_database_instance_name", None), + effective_database_project_id=d.get("effective_database_project_id", None), effective_logical_database_name=d.get("effective_logical_database_name", None), logical_database_name=d.get("logical_database_name", None), name=d.get("name", None), spec=_from_dict(d, "spec", SyncedTableSpec), + table_serving_url=d.get("table_serving_url", None), unity_catalog_provisioning_state=_enum(d, "unity_catalog_provisioning_state", ProvisioningInfoState), ) @@ -1761,19 +1836,47 @@ def delete_database_table(self, name: str): self._api.do("DELETE", f"/api/2.0/database/tables/{name}", headers=headers) - def delete_synced_database_table(self, name: str): + def delete_synced_database_table(self, name: str, *, purge_data: Optional[bool] = None): """Delete a Synced Database Table. :param name: str + :param purge_data: bool (optional) + Optional. When set to true, the actual PostgreSQL table will be dropped from the database. + + + """ + query = {} + if purge_data is not None: + query["purge_data"] = purge_data + headers = { + "Accept": "application/json", + } + + self._api.do("DELETE", f"/api/2.0/database/synced_tables/{name}", query=query, headers=headers) + + def failover_database_instance( + self, name: str, *, failover_target_database_instance_name: Optional[str] = None + ) -> DatabaseInstance: + """Failover the primary node of a Database Instance to a secondary. + :param name: str + Name of the instance to failover. + :param failover_target_database_instance_name: str (optional) + + :returns: :class:`DatabaseInstance` """ + body = {} + if failover_target_database_instance_name is not None: + body["failover_target_database_instance_name"] = failover_target_database_instance_name headers = { "Accept": "application/json", + "Content-Type": "application/json", } - self._api.do("DELETE", f"/api/2.0/database/synced_tables/{name}", headers=headers) + res = self._api.do("POST", f"/api/2.0/database/instances/{name}/failover", body=body, headers=headers) + return DatabaseInstance.from_dict(res) def find_database_instance_by_uid(self, *, uid: Optional[str] = None) -> DatabaseInstance: """Find a Database Instance by uid. @@ -2098,6 +2201,43 @@ def update_database_instance( res = self._api.do("PATCH", f"/api/2.0/database/instances/{name}", query=query, body=body, headers=headers) return DatabaseInstance.from_dict(res) + def update_database_instance_role( + self, + instance_name: str, + name: str, + database_instance_role: DatabaseInstanceRole, + *, + database_instance_name: Optional[str] = None, + ) -> DatabaseInstanceRole: + """Update a role for a Database Instance. + + :param instance_name: str + :param name: str + The name of the role. This is the unique identifier for the role in an instance. + :param database_instance_role: :class:`DatabaseInstanceRole` + :param database_instance_name: str (optional) + + :returns: :class:`DatabaseInstanceRole` + """ + + body = database_instance_role.as_dict() + query = {} + if database_instance_name is not None: + query["database_instance_name"] = database_instance_name + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "PATCH", + f"/api/2.0/database/instances/{instance_name}/roles/{name}", + query=query, + body=body, + headers=headers, + ) + return DatabaseInstanceRole.from_dict(res) + def update_synced_database_table( self, name: str, synced_table: SyncedDatabaseTable, update_mask: str ) -> SyncedDatabaseTable: diff --git a/databricks/sdk/service/dataquality.py b/databricks/sdk/service/dataquality.py index d9a64e5df..9581bf82a 100755 --- a/databricks/sdk/service/dataquality.py +++ b/databricks/sdk/service/dataquality.py @@ -35,20 +35,54 @@ class AggregationGranularity(Enum): class AnomalyDetectionConfig: """Anomaly Detection Configurations.""" + anomaly_detection_workflow_id: Optional[int] = None + """The id of the workflow that detects the anomaly. This field will only be returned in the + Get/Update response, if the request comes from the workspace where this anomaly detection job is + created.""" + + job_type: Optional[AnomalyDetectionJobType] = None + """The type of the last run of the workflow.""" + + publish_health_indicator: Optional[bool] = None + """If the health indicator should be shown.""" + def as_dict(self) -> dict: """Serializes the AnomalyDetectionConfig into a dictionary suitable for use as a JSON request body.""" body = {} + if self.anomaly_detection_workflow_id is not None: + body["anomaly_detection_workflow_id"] = self.anomaly_detection_workflow_id + if self.job_type is not None: + body["job_type"] = self.job_type.value + if self.publish_health_indicator is not None: + body["publish_health_indicator"] = self.publish_health_indicator return body def as_shallow_dict(self) -> dict: """Serializes the AnomalyDetectionConfig into a shallow dictionary of its immediate attributes.""" body = {} + if self.anomaly_detection_workflow_id is not None: + body["anomaly_detection_workflow_id"] = self.anomaly_detection_workflow_id + if self.job_type is not None: + body["job_type"] = self.job_type + if self.publish_health_indicator is not None: + body["publish_health_indicator"] = self.publish_health_indicator return body @classmethod def from_dict(cls, d: Dict[str, Any]) -> AnomalyDetectionConfig: """Deserializes the AnomalyDetectionConfig from a dictionary.""" - return cls() + return cls( + anomaly_detection_workflow_id=d.get("anomaly_detection_workflow_id", None), + job_type=_enum(d, "job_type", AnomalyDetectionJobType), + publish_health_indicator=d.get("publish_health_indicator", None), + ) + + +class AnomalyDetectionJobType(Enum): + """Anomaly Detection job type.""" + + ANOMALY_DETECTION_JOB_TYPE_INTERNAL_HIDDEN = "ANOMALY_DETECTION_JOB_TYPE_INTERNAL_HIDDEN" + ANOMALY_DETECTION_JOB_TYPE_NORMAL = "ANOMALY_DETECTION_JOB_TYPE_NORMAL" @dataclass @@ -430,6 +464,9 @@ class InferenceLogConfig: label_column: Optional[str] = None """Column for the label.""" + prediction_probability_column: Optional[str] = None + """Column for prediction probabilities""" + def as_dict(self) -> dict: """Serializes the InferenceLogConfig into a dictionary suitable for use as a JSON request body.""" body = {} @@ -441,6 +478,8 @@ def as_dict(self) -> dict: body["model_id_column"] = self.model_id_column if self.prediction_column is not None: body["prediction_column"] = self.prediction_column + if self.prediction_probability_column is not None: + body["prediction_probability_column"] = self.prediction_probability_column if self.problem_type is not None: body["problem_type"] = self.problem_type.value if self.timestamp_column is not None: @@ -458,6 +497,8 @@ def as_shallow_dict(self) -> dict: body["model_id_column"] = self.model_id_column if self.prediction_column is not None: body["prediction_column"] = self.prediction_column + if self.prediction_probability_column is not None: + body["prediction_probability_column"] = self.prediction_probability_column if self.problem_type is not None: body["problem_type"] = self.problem_type if self.timestamp_column is not None: @@ -472,6 +513,7 @@ def from_dict(cls, d: Dict[str, Any]) -> InferenceLogConfig: label_column=d.get("label_column", None), model_id_column=d.get("model_id_column", None), prediction_column=d.get("prediction_column", None), + prediction_probability_column=d.get("prediction_probability_column", None), problem_type=_enum(d, "problem_type", InferenceProblemType), timestamp_column=d.get("timestamp_column", None), ) diff --git a/databricks/sdk/service/iam.py b/databricks/sdk/service/iam.py index e84121f29..4c0d13ab6 100755 --- a/databricks/sdk/service/iam.py +++ b/databricks/sdk/service/iam.py @@ -2525,8 +2525,9 @@ def list( start_index: Optional[int] = None, ) -> Iterator[AccountGroup]: """Gets all details of the groups associated with the Databricks account. As of 08/22/2025, this endpoint - will not return members. Instead, members should be retrieved by iterating through `Get group - details`. + will no longer return members. Instead, members should be retrieved by iterating through `Get group + details`. Existing accounts that rely on this attribute will not be impacted and will continue + receiving member data as before. :param attributes: str (optional) Comma-separated list of attributes to return in response. diff --git a/databricks/sdk/service/iamv2.py b/databricks/sdk/service/iamv2.py index 0422f37f8..e66d5cd3e 100755 --- a/databricks/sdk/service/iamv2.py +++ b/databricks/sdk/service/iamv2.py @@ -7,7 +7,8 @@ from enum import Enum from typing import Any, Dict, List, Optional -from databricks.sdk.service._internal import _enum, _from_dict, _repeated_enum +from databricks.sdk.service._internal import (_enum, _from_dict, + _repeated_dict, _repeated_enum) _LOG = logging.getLogger("databricks.sdk") @@ -68,6 +69,148 @@ def from_dict(cls, d: Dict[str, Any]) -> Group: ) +@dataclass +class ListGroupsResponse: + """TODO: Write description later when this method is implemented""" + + groups: Optional[List[Group]] = None + + next_page_token: Optional[str] = None + """A token, which can be sent as page_token to retrieve the next page. If this field is omitted, + there are no subsequent pages.""" + + def as_dict(self) -> dict: + """Serializes the ListGroupsResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.groups: + body["groups"] = [v.as_dict() for v in self.groups] + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ListGroupsResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.groups: + body["groups"] = self.groups + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ListGroupsResponse: + """Deserializes the ListGroupsResponse from a dictionary.""" + return cls(groups=_repeated_dict(d, "groups", Group), next_page_token=d.get("next_page_token", None)) + + +@dataclass +class ListServicePrincipalsResponse: + """TODO: Write description later when this method is implemented""" + + next_page_token: Optional[str] = None + """A token, which can be sent as page_token to retrieve the next page. If this field is omitted, + there are no subsequent pages.""" + + service_principals: Optional[List[ServicePrincipal]] = None + + def as_dict(self) -> dict: + """Serializes the ListServicePrincipalsResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + if self.service_principals: + body["service_principals"] = [v.as_dict() for v in self.service_principals] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ListServicePrincipalsResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + if self.service_principals: + body["service_principals"] = self.service_principals + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ListServicePrincipalsResponse: + """Deserializes the ListServicePrincipalsResponse from a dictionary.""" + return cls( + next_page_token=d.get("next_page_token", None), + service_principals=_repeated_dict(d, "service_principals", ServicePrincipal), + ) + + +@dataclass +class ListUsersResponse: + """TODO: Write description later when this method is implemented""" + + next_page_token: Optional[str] = None + """A token, which can be sent as page_token to retrieve the next page. If this field is omitted, + there are no subsequent pages.""" + + users: Optional[List[User]] = None + + def as_dict(self) -> dict: + """Serializes the ListUsersResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + if self.users: + body["users"] = [v.as_dict() for v in self.users] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ListUsersResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + if self.users: + body["users"] = self.users + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ListUsersResponse: + """Deserializes the ListUsersResponse from a dictionary.""" + return cls(next_page_token=d.get("next_page_token", None), users=_repeated_dict(d, "users", User)) + + +@dataclass +class ListWorkspaceAccessDetailsResponse: + """TODO: Write description later when this method is implemented""" + + next_page_token: Optional[str] = None + """A token, which can be sent as page_token to retrieve the next page. If this field is omitted, + there are no subsequent pages.""" + + workspace_access_details: Optional[List[WorkspaceAccessDetail]] = None + + def as_dict(self) -> dict: + """Serializes the ListWorkspaceAccessDetailsResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + if self.workspace_access_details: + body["workspace_access_details"] = [v.as_dict() for v in self.workspace_access_details] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ListWorkspaceAccessDetailsResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + if self.workspace_access_details: + body["workspace_access_details"] = self.workspace_access_details + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ListWorkspaceAccessDetailsResponse: + """Deserializes the ListWorkspaceAccessDetailsResponse from a dictionary.""" + return cls( + next_page_token=d.get("next_page_token", None), + workspace_access_details=_repeated_dict(d, "workspace_access_details", WorkspaceAccessDetail), + ) + + class PrincipalType(Enum): """The type of the principal (user/sp/group).""" @@ -427,6 +570,221 @@ class AccountIamV2API: def __init__(self, api_client): self._api = api_client + def create_group(self, group: Group) -> Group: + """TODO: Write description later when this method is implemented + + :param group: :class:`Group` + Required. Group to be created in + + :returns: :class:`Group` + """ + + body = group.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "POST", f"/api/2.0/identity/accounts/{self._api.account_id}/groups", body=body, headers=headers + ) + return Group.from_dict(res) + + def create_service_principal(self, service_principal: ServicePrincipal) -> ServicePrincipal: + """TODO: Write description later when this method is implemented + + :param service_principal: :class:`ServicePrincipal` + Required. Service principal to be created in + + :returns: :class:`ServicePrincipal` + """ + + body = service_principal.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "POST", f"/api/2.0/identity/accounts/{self._api.account_id}/servicePrincipals", body=body, headers=headers + ) + return ServicePrincipal.from_dict(res) + + def create_user(self, user: User) -> User: + """TODO: Write description later when this method is implemented + + :param user: :class:`User` + Required. User to be created in + + :returns: :class:`User` + """ + + body = user.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "POST", f"/api/2.0/identity/accounts/{self._api.account_id}/users", body=body, headers=headers + ) + return User.from_dict(res) + + def create_workspace_access_detail( + self, parent: str, workspace_access_detail: WorkspaceAccessDetail + ) -> WorkspaceAccessDetail: + """TODO: Write description later when this method is implemented + + :param parent: str + Required. The parent path for workspace access detail. + :param workspace_access_detail: :class:`WorkspaceAccessDetail` + Required. Workspace access detail to be created in . + + :returns: :class:`WorkspaceAccessDetail` + """ + + body = workspace_access_detail.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "POST", + f"/api/2.0/identity/accounts/{self._api.account_id}/workspaces/{parent}/workspaceAccessDetails", + body=body, + headers=headers, + ) + return WorkspaceAccessDetail.from_dict(res) + + def delete_group(self, internal_id: int): + """TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the group in Databricks. + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do( + "DELETE", f"/api/2.0/identity/accounts/{self._api.account_id}/groups/{internal_id}", headers=headers + ) + + def delete_service_principal(self, internal_id: int): + """TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the service principal in Databricks. + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do( + "DELETE", + f"/api/2.0/identity/accounts/{self._api.account_id}/servicePrincipals/{internal_id}", + headers=headers, + ) + + def delete_user(self, internal_id: int): + """TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the user in Databricks. + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do( + "DELETE", f"/api/2.0/identity/accounts/{self._api.account_id}/users/{internal_id}", headers=headers + ) + + def delete_workspace_access_detail(self, workspace_id: int, principal_id: int): + """TODO: Write description later when this method is implemented + + :param workspace_id: int + The workspace ID where the principal has access. + :param principal_id: int + Required. ID of the principal in Databricks to delete workspace access for. + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do( + "DELETE", + f"/api/2.0/identity/accounts/{self._api.account_id}/workspaces/{workspace_id}/workspaceAccessDetails/{principal_id}", + headers=headers, + ) + + def get_group(self, internal_id: int) -> Group: + """TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the group in Databricks. + + :returns: :class:`Group` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do( + "GET", f"/api/2.0/identity/accounts/{self._api.account_id}/groups/{internal_id}", headers=headers + ) + return Group.from_dict(res) + + def get_service_principal(self, internal_id: int) -> ServicePrincipal: + """TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the service principal in Databricks. + + :returns: :class:`ServicePrincipal` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do( + "GET", f"/api/2.0/identity/accounts/{self._api.account_id}/servicePrincipals/{internal_id}", headers=headers + ) + return ServicePrincipal.from_dict(res) + + def get_user(self, internal_id: int) -> User: + """TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the user in Databricks. + + :returns: :class:`User` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do( + "GET", f"/api/2.0/identity/accounts/{self._api.account_id}/users/{internal_id}", headers=headers + ) + return User.from_dict(res) + def get_workspace_access_detail( self, workspace_id: int, principal_id: int, *, view: Optional[WorkspaceAccessDetailView] = None ) -> WorkspaceAccessDetail: @@ -462,6 +820,119 @@ def get_workspace_access_detail( ) return WorkspaceAccessDetail.from_dict(res) + def list_groups(self, *, page_size: Optional[int] = None, page_token: Optional[str] = None) -> ListGroupsResponse: + """TODO: Write description later when this method is implemented + + :param page_size: int (optional) + The maximum number of groups to return. The service may return fewer than this value. + :param page_token: str (optional) + A page token, received from a previous ListGroups call. Provide this to retrieve the subsequent + page. + + :returns: :class:`ListGroupsResponse` + """ + + query = {} + if page_size is not None: + query["page_size"] = page_size + if page_token is not None: + query["page_token"] = page_token + headers = { + "Accept": "application/json", + } + + res = self._api.do( + "GET", f"/api/2.0/identity/accounts/{self._api.account_id}/groups", query=query, headers=headers + ) + return ListGroupsResponse.from_dict(res) + + def list_service_principals( + self, *, page_size: Optional[int] = None, page_token: Optional[str] = None + ) -> ListServicePrincipalsResponse: + """TODO: Write description later when this method is implemented + + :param page_size: int (optional) + The maximum number of service principals to return. The service may return fewer than this value. + :param page_token: str (optional) + A page token, received from a previous ListServicePrincipals call. Provide this to retrieve the + subsequent page. + + :returns: :class:`ListServicePrincipalsResponse` + """ + + query = {} + if page_size is not None: + query["page_size"] = page_size + if page_token is not None: + query["page_token"] = page_token + headers = { + "Accept": "application/json", + } + + res = self._api.do( + "GET", f"/api/2.0/identity/accounts/{self._api.account_id}/servicePrincipals", query=query, headers=headers + ) + return ListServicePrincipalsResponse.from_dict(res) + + def list_users(self, *, page_size: Optional[int] = None, page_token: Optional[str] = None) -> ListUsersResponse: + """TODO: Write description later when this method is implemented + + :param page_size: int (optional) + The maximum number of users to return. The service may return fewer than this value. + :param page_token: str (optional) + A page token, received from a previous ListUsers call. Provide this to retrieve the subsequent page. + + :returns: :class:`ListUsersResponse` + """ + + query = {} + if page_size is not None: + query["page_size"] = page_size + if page_token is not None: + query["page_token"] = page_token + headers = { + "Accept": "application/json", + } + + res = self._api.do( + "GET", f"/api/2.0/identity/accounts/{self._api.account_id}/users", query=query, headers=headers + ) + return ListUsersResponse.from_dict(res) + + def list_workspace_access_details( + self, workspace_id: int, *, page_size: Optional[int] = None, page_token: Optional[str] = None + ) -> ListWorkspaceAccessDetailsResponse: + """TODO: Write description later when this method is implemented + + :param workspace_id: int + The workspace ID for which the workspace access details are being fetched. + :param page_size: int (optional) + The maximum number of workspace access details to return. The service may return fewer than this + value. + :param page_token: str (optional) + A page token, received from a previous ListWorkspaceAccessDetails call. Provide this to retrieve the + subsequent page. + + :returns: :class:`ListWorkspaceAccessDetailsResponse` + """ + + query = {} + if page_size is not None: + query["page_size"] = page_size + if page_token is not None: + query["page_token"] = page_token + headers = { + "Accept": "application/json", + } + + res = self._api.do( + "GET", + f"/api/2.0/identity/accounts/{self._api.account_id}/workspaces/{workspace_id}/workspaceAccessDetails", + query=query, + headers=headers, + ) + return ListWorkspaceAccessDetailsResponse.from_dict(res) + def resolve_group(self, external_id: str) -> ResolveGroupResponse: """Resolves a group with the given external ID from the customer's IdP. If the group does not exist, it will be created in the account. If the customer is not onboarded onto Automatic Identity Management @@ -543,12 +1014,324 @@ def resolve_user(self, external_id: str) -> ResolveUserResponse: ) return ResolveUserResponse.from_dict(res) + def update_group(self, internal_id: int, group: Group, update_mask: str) -> Group: + """TODO: Write description later when this method is implemented -class WorkspaceIamV2API: - """These APIs are used to manage identities and the workspace access of these identities in .""" + :param internal_id: int + Required. Internal ID of the group in Databricks. + :param group: :class:`Group` + Required. Group to be updated in + :param update_mask: str + Optional. The list of fields to update. - def __init__(self, api_client): - self._api = api_client + :returns: :class:`Group` + """ + + body = group.as_dict() + query = {} + if update_mask is not None: + query["update_mask"] = update_mask + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "PATCH", + f"/api/2.0/identity/accounts/{self._api.account_id}/groups/{internal_id}", + query=query, + body=body, + headers=headers, + ) + return Group.from_dict(res) + + def update_service_principal( + self, internal_id: int, service_principal: ServicePrincipal, update_mask: str + ) -> ServicePrincipal: + """TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the service principal in Databricks. + :param service_principal: :class:`ServicePrincipal` + Required. Service Principal to be updated in + :param update_mask: str + Optional. The list of fields to update. + + :returns: :class:`ServicePrincipal` + """ + + body = service_principal.as_dict() + query = {} + if update_mask is not None: + query["update_mask"] = update_mask + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "PATCH", + f"/api/2.0/identity/accounts/{self._api.account_id}/servicePrincipals/{internal_id}", + query=query, + body=body, + headers=headers, + ) + return ServicePrincipal.from_dict(res) + + def update_user(self, internal_id: int, user: User, update_mask: str) -> User: + """TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the user in Databricks. + :param user: :class:`User` + Required. User to be updated in + :param update_mask: str + Optional. The list of fields to update. + + :returns: :class:`User` + """ + + body = user.as_dict() + query = {} + if update_mask is not None: + query["update_mask"] = update_mask + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "PATCH", + f"/api/2.0/identity/accounts/{self._api.account_id}/users/{internal_id}", + query=query, + body=body, + headers=headers, + ) + return User.from_dict(res) + + def update_workspace_access_detail( + self, workspace_id: int, principal_id: int, workspace_access_detail: WorkspaceAccessDetail, update_mask: str + ) -> WorkspaceAccessDetail: + """TODO: Write description later when this method is implemented + + :param workspace_id: int + Required. The workspace ID for which the workspace access detail is being updated. + :param principal_id: int + Required. ID of the principal in Databricks. + :param workspace_access_detail: :class:`WorkspaceAccessDetail` + Required. Workspace access detail to be updated in + :param update_mask: str + Optional. The list of fields to update. + + :returns: :class:`WorkspaceAccessDetail` + """ + + body = workspace_access_detail.as_dict() + query = {} + if update_mask is not None: + query["update_mask"] = update_mask + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "PATCH", + f"/api/2.0/identity/accounts/{self._api.account_id}/workspaces/{workspace_id}/workspaceAccessDetails/{principal_id}", + query=query, + body=body, + headers=headers, + ) + return WorkspaceAccessDetail.from_dict(res) + + +class WorkspaceIamV2API: + """These APIs are used to manage identities and the workspace access of these identities in .""" + + def __init__(self, api_client): + self._api = api_client + + def create_group_proxy(self, group: Group) -> Group: + """TODO: Write description later when this method is implemented + + :param group: :class:`Group` + Required. Group to be created in + + :returns: :class:`Group` + """ + + body = group.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", "/api/2.0/identity/groups", body=body, headers=headers) + return Group.from_dict(res) + + def create_service_principal_proxy(self, service_principal: ServicePrincipal) -> ServicePrincipal: + """TODO: Write description later when this method is implemented + + :param service_principal: :class:`ServicePrincipal` + Required. Service principal to be created in + + :returns: :class:`ServicePrincipal` + """ + + body = service_principal.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", "/api/2.0/identity/servicePrincipals", body=body, headers=headers) + return ServicePrincipal.from_dict(res) + + def create_user_proxy(self, user: User) -> User: + """TODO: Write description later when this method is implemented + + :param user: :class:`User` + Required. User to be created in + + :returns: :class:`User` + """ + + body = user.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", "/api/2.0/identity/users", body=body, headers=headers) + return User.from_dict(res) + + def create_workspace_access_detail_local( + self, workspace_access_detail: WorkspaceAccessDetail + ) -> WorkspaceAccessDetail: + """TODO: Write description later when this method is implemented + + :param workspace_access_detail: :class:`WorkspaceAccessDetail` + Required. Workspace access detail to be created in . + + :returns: :class:`WorkspaceAccessDetail` + """ + + body = workspace_access_detail.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", "/api/2.0/identity/workspaceAccessDetails", body=body, headers=headers) + return WorkspaceAccessDetail.from_dict(res) + + def delete_group_proxy(self, internal_id: int): + """TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the group in Databricks. + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do("DELETE", f"/api/2.0/identity/groups/{internal_id}", headers=headers) + + def delete_service_principal_proxy(self, internal_id: int): + """TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the service principal in Databricks. + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do("DELETE", f"/api/2.0/identity/servicePrincipals/{internal_id}", headers=headers) + + def delete_user_proxy(self, internal_id: int): + """TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the user in Databricks. + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do("DELETE", f"/api/2.0/identity/users/{internal_id}", headers=headers) + + def delete_workspace_access_detail_local(self, principal_id: int): + """TODO: Write description later when this method is implemented + + :param principal_id: int + Required. ID of the principal in Databricks. + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do("DELETE", f"/api/2.0/identity/workspaceAccessDetails/{principal_id}", headers=headers) + + def get_group_proxy(self, internal_id: int) -> Group: + """TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the group in Databricks. + + :returns: :class:`Group` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", f"/api/2.0/identity/groups/{internal_id}", headers=headers) + return Group.from_dict(res) + + def get_service_principal_proxy(self, internal_id: int) -> ServicePrincipal: + """TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the service principal in Databricks. + + :returns: :class:`ServicePrincipal` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", f"/api/2.0/identity/servicePrincipals/{internal_id}", headers=headers) + return ServicePrincipal.from_dict(res) + + def get_user_proxy(self, internal_id: int) -> User: + """TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the user in Databricks. + + :returns: :class:`User` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", f"/api/2.0/identity/users/{internal_id}", headers=headers) + return User.from_dict(res) def get_workspace_access_detail_local( self, principal_id: int, *, view: Optional[WorkspaceAccessDetailView] = None @@ -580,6 +1363,110 @@ def get_workspace_access_detail_local( ) return WorkspaceAccessDetail.from_dict(res) + def list_groups_proxy( + self, *, page_size: Optional[int] = None, page_token: Optional[str] = None + ) -> ListGroupsResponse: + """TODO: Write description later when this method is implemented + + :param page_size: int (optional) + The maximum number of groups to return. The service may return fewer than this value. + :param page_token: str (optional) + A page token, received from a previous ListGroups call. Provide this to retrieve the subsequent + page. + + :returns: :class:`ListGroupsResponse` + """ + + query = {} + if page_size is not None: + query["page_size"] = page_size + if page_token is not None: + query["page_token"] = page_token + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", "/api/2.0/identity/groups", query=query, headers=headers) + return ListGroupsResponse.from_dict(res) + + def list_service_principals_proxy( + self, *, page_size: Optional[int] = None, page_token: Optional[str] = None + ) -> ListServicePrincipalsResponse: + """TODO: Write description later when this method is implemented + + :param page_size: int (optional) + The maximum number of SPs to return. The service may return fewer than this value. + :param page_token: str (optional) + A page token, received from a previous ListServicePrincipals call. Provide this to retrieve the + subsequent page. + + :returns: :class:`ListServicePrincipalsResponse` + """ + + query = {} + if page_size is not None: + query["page_size"] = page_size + if page_token is not None: + query["page_token"] = page_token + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", "/api/2.0/identity/servicePrincipals", query=query, headers=headers) + return ListServicePrincipalsResponse.from_dict(res) + + def list_users_proxy( + self, *, page_size: Optional[int] = None, page_token: Optional[str] = None + ) -> ListUsersResponse: + """TODO: Write description later when this method is implemented + + :param page_size: int (optional) + The maximum number of users to return. The service may return fewer than this value. + :param page_token: str (optional) + A page token, received from a previous ListUsers call. Provide this to retrieve the subsequent page. + + :returns: :class:`ListUsersResponse` + """ + + query = {} + if page_size is not None: + query["page_size"] = page_size + if page_token is not None: + query["page_token"] = page_token + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", "/api/2.0/identity/users", query=query, headers=headers) + return ListUsersResponse.from_dict(res) + + def list_workspace_access_details_local( + self, *, page_size: Optional[int] = None, page_token: Optional[str] = None + ) -> ListWorkspaceAccessDetailsResponse: + """TODO: Write description later when this method is implemented + + :param page_size: int (optional) + The maximum number of workspace access details to return. The service may return fewer than this + value. + :param page_token: str (optional) + A page token, received from a previous ListWorkspaceAccessDetails call. Provide this to retrieve the + subsequent page. + + :returns: :class:`ListWorkspaceAccessDetailsResponse` + """ + + query = {} + if page_size is not None: + query["page_size"] = page_size + if page_token is not None: + query["page_token"] = page_token + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", "/api/2.0/identity/workspaceAccessDetails", query=query, headers=headers) + return ListWorkspaceAccessDetailsResponse.from_dict(res) + def resolve_group_proxy(self, external_id: str) -> ResolveGroupResponse: """Resolves a group with the given external ID from the customer's IdP. If the group does not exist, it will be created in the account. If the customer is not onboarded onto Automatic Identity Management @@ -647,3 +1534,111 @@ def resolve_user_proxy(self, external_id: str) -> ResolveUserResponse: res = self._api.do("POST", "/api/2.0/identity/users/resolveByExternalId", body=body, headers=headers) return ResolveUserResponse.from_dict(res) + + def update_group_proxy(self, internal_id: int, group: Group, update_mask: str) -> Group: + """TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the group in Databricks. + :param group: :class:`Group` + Required. Group to be updated in + :param update_mask: str + Optional. The list of fields to update. + + :returns: :class:`Group` + """ + + body = group.as_dict() + query = {} + if update_mask is not None: + query["update_mask"] = update_mask + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("PATCH", f"/api/2.0/identity/groups/{internal_id}", query=query, body=body, headers=headers) + return Group.from_dict(res) + + def update_service_principal_proxy( + self, internal_id: int, service_principal: ServicePrincipal, update_mask: str + ) -> ServicePrincipal: + """TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the service principal in Databricks. + :param service_principal: :class:`ServicePrincipal` + Required. Service principal to be updated in + :param update_mask: str + Optional. The list of fields to update. + + :returns: :class:`ServicePrincipal` + """ + + body = service_principal.as_dict() + query = {} + if update_mask is not None: + query["update_mask"] = update_mask + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "PATCH", f"/api/2.0/identity/servicePrincipals/{internal_id}", query=query, body=body, headers=headers + ) + return ServicePrincipal.from_dict(res) + + def update_user_proxy(self, internal_id: int, user: User, update_mask: str) -> User: + """TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the user in Databricks. + :param user: :class:`User` + Required. User to be updated in + :param update_mask: str + Optional. The list of fields to update. + + :returns: :class:`User` + """ + + body = user.as_dict() + query = {} + if update_mask is not None: + query["update_mask"] = update_mask + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("PATCH", f"/api/2.0/identity/users/{internal_id}", query=query, body=body, headers=headers) + return User.from_dict(res) + + def update_workspace_access_detail_local( + self, principal_id: int, workspace_access_detail: WorkspaceAccessDetail, update_mask: str + ) -> WorkspaceAccessDetail: + """TODO: Write description later when this method is implemented + + :param principal_id: int + Required. ID of the principal in Databricks. + :param workspace_access_detail: :class:`WorkspaceAccessDetail` + Required. WorkspaceAccessDetail to be updated in + :param update_mask: str + Optional. The list of fields to update. + + :returns: :class:`WorkspaceAccessDetail` + """ + + body = workspace_access_detail.as_dict() + query = {} + if update_mask is not None: + query["update_mask"] = update_mask + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "PATCH", f"/api/2.0/identity/workspaceAccessDetails/{principal_id}", query=query, body=body, headers=headers + ) + return WorkspaceAccessDetail.from_dict(res) diff --git a/databricks/sdk/service/jobs.py b/databricks/sdk/service/jobs.py index 1ca8e631c..c239acc6e 100755 --- a/databricks/sdk/service/jobs.py +++ b/databricks/sdk/service/jobs.py @@ -54,6 +54,10 @@ class BaseJob: job_id: Optional[int] = None """The canonical identifier for this job.""" + path: Optional[str] = None + """Path of the job object in workspace file tree, including file extension. If absent, the job + doesn't have a workspace object. Example: /Workspace/user@example.com/my_project/my_job.job.json""" + settings: Optional[JobSettings] = None """Settings for this job and all of its runs. These settings can be updated using the `resetJob` method.""" @@ -76,6 +80,8 @@ def as_dict(self) -> dict: body["has_more"] = self.has_more if self.job_id is not None: body["job_id"] = self.job_id + if self.path is not None: + body["path"] = self.path if self.settings: body["settings"] = self.settings.as_dict() if self.trigger_state: @@ -97,6 +103,8 @@ def as_shallow_dict(self) -> dict: body["has_more"] = self.has_more if self.job_id is not None: body["job_id"] = self.job_id + if self.path is not None: + body["path"] = self.path if self.settings: body["settings"] = self.settings if self.trigger_state: @@ -113,6 +121,7 @@ def from_dict(cls, d: Dict[str, Any]) -> BaseJob: effective_usage_policy_id=d.get("effective_usage_policy_id", None), has_more=d.get("has_more", None), job_id=d.get("job_id", None), + path=d.get("path", None), settings=_from_dict(d, "settings", JobSettings), trigger_state=_from_dict(d, "trigger_state", TriggerStateProto), ) @@ -589,6 +598,9 @@ class CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput: output_schema_info: Optional[OutputSchemaInfo] = None """Information on how to access the output schema for the clean room run""" + shared_output_schema_info: Optional[OutputSchemaInfo] = None + """Information on how to access the shared output schema for the clean room run""" + def as_dict(self) -> dict: """Serializes the CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput into a dictionary suitable for use as a JSON request body.""" body = {} @@ -598,6 +610,8 @@ def as_dict(self) -> dict: body["notebook_output"] = self.notebook_output.as_dict() if self.output_schema_info: body["output_schema_info"] = self.output_schema_info.as_dict() + if self.shared_output_schema_info: + body["shared_output_schema_info"] = self.shared_output_schema_info.as_dict() return body def as_shallow_dict(self) -> dict: @@ -609,6 +623,8 @@ def as_shallow_dict(self) -> dict: body["notebook_output"] = self.notebook_output if self.output_schema_info: body["output_schema_info"] = self.output_schema_info + if self.shared_output_schema_info: + body["shared_output_schema_info"] = self.shared_output_schema_info return body @classmethod @@ -618,6 +634,7 @@ def from_dict(cls, d: Dict[str, Any]) -> CleanRoomsNotebookTaskCleanRoomsNoteboo clean_room_job_run_state=_from_dict(d, "clean_room_job_run_state", CleanRoomTaskRunState), notebook_output=_from_dict(d, "notebook_output", NotebookOutput), output_schema_info=_from_dict(d, "output_schema_info", OutputSchemaInfo), + shared_output_schema_info=_from_dict(d, "shared_output_schema_info", OutputSchemaInfo), ) @@ -2182,6 +2199,10 @@ class Job: next_page_token: Optional[str] = None """A token that can be used to list the next page of array properties.""" + path: Optional[str] = None + """Path of the job object in workspace file tree, including file extension. If absent, the job + doesn't have a workspace object. Example: /Workspace/user@example.com/my_project/my_job.job.json""" + run_as_user_name: Optional[str] = None """The email of an active workspace user or the application ID of a service principal that the job runs as. This value can be changed by setting the `run_as` field when creating or updating a @@ -2215,6 +2236,8 @@ def as_dict(self) -> dict: body["job_id"] = self.job_id if self.next_page_token is not None: body["next_page_token"] = self.next_page_token + if self.path is not None: + body["path"] = self.path if self.run_as_user_name is not None: body["run_as_user_name"] = self.run_as_user_name if self.settings: @@ -2240,6 +2263,8 @@ def as_shallow_dict(self) -> dict: body["job_id"] = self.job_id if self.next_page_token is not None: body["next_page_token"] = self.next_page_token + if self.path is not None: + body["path"] = self.path if self.run_as_user_name is not None: body["run_as_user_name"] = self.run_as_user_name if self.settings: @@ -2259,6 +2284,7 @@ def from_dict(cls, d: Dict[str, Any]) -> Job: has_more=d.get("has_more", None), job_id=d.get("job_id", None), next_page_token=d.get("next_page_token", None), + path=d.get("path", None), run_as_user_name=d.get("run_as_user_name", None), settings=_from_dict(d, "settings", JobSettings), trigger_state=_from_dict(d, "trigger_state", TriggerStateProto), @@ -2915,10 +2941,10 @@ class JobSettings: environments: Optional[List[JobEnvironment]] = None """A list of task execution environment specifications that can be referenced by serverless tasks - of this job. An environment is required to be present for serverless tasks. For serverless - notebook tasks, the environment is accessible in the notebook environment panel. For other - serverless tasks, the task environment is required to be specified using environment_key in the - task settings.""" + of this job. For serverless notebook tasks, if the environment_key is not specified, the + notebook environment will be used if present. If a jobs environment is specified, it will + override the notebook environment. For other serverless tasks, the task environment is required + to be specified using environment_key in the task settings.""" format: Optional[Format] = None """Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. @@ -2961,6 +2987,10 @@ class JobSettings: parameters: Optional[List[JobParameterDefinition]] = None """Job-level parameter definitions""" + parent_path: Optional[str] = None + """Path of the job parent folder in workspace file tree. If absent, the job doesn't have a + workspace object.""" + performance_target: Optional[PerformanceTarget] = None """The performance mode on a serverless job. This field determines the level of compute performance or cost-efficiency for the run. @@ -3042,6 +3072,8 @@ def as_dict(self) -> dict: body["notification_settings"] = self.notification_settings.as_dict() if self.parameters: body["parameters"] = [v.as_dict() for v in self.parameters] + if self.parent_path is not None: + body["parent_path"] = self.parent_path if self.performance_target is not None: body["performance_target"] = self.performance_target.value if self.queue: @@ -3097,6 +3129,8 @@ def as_shallow_dict(self) -> dict: body["notification_settings"] = self.notification_settings if self.parameters: body["parameters"] = self.parameters + if self.parent_path is not None: + body["parent_path"] = self.parent_path if self.performance_target is not None: body["performance_target"] = self.performance_target if self.queue: @@ -3138,6 +3172,7 @@ def from_dict(cls, d: Dict[str, Any]) -> JobSettings: name=d.get("name", None), notification_settings=_from_dict(d, "notification_settings", JobNotificationSettings), parameters=_repeated_dict(d, "parameters", JobParameterDefinition), + parent_path=d.get("parent_path", None), performance_target=_enum(d, "performance_target", PerformanceTarget), queue=_from_dict(d, "queue", QueueSettings), run_as=_from_dict(d, "run_as", JobRunAs), @@ -3460,6 +3495,78 @@ def from_dict(cls, d: Dict[str, Any]) -> ListRunsResponse: ) +@dataclass +class ModelTriggerConfiguration: + condition: ModelTriggerConfigurationCondition + """The condition based on which to trigger a job run.""" + + aliases: Optional[List[str]] = None + """Aliases of the model versions to monitor. Can only be used in conjunction with condition + MODEL_ALIAS_SET.""" + + min_time_between_triggers_seconds: Optional[int] = None + """If set, the trigger starts a run only after the specified amount of time has passed since the + last time the trigger fired. The minimum allowed value is 60 seconds.""" + + securable_name: Optional[str] = None + """Name of the securable to monitor ("mycatalog.myschema.mymodel" in the case of model-level + triggers, "mycatalog.myschema" in the case of schema-level triggers) or empty in the case of + metastore-level triggers.""" + + wait_after_last_change_seconds: Optional[int] = None + """If set, the trigger starts a run only after no model updates have occurred for the specified + time and can be used to wait for a series of model updates before triggering a run. The minimum + allowed value is 60 seconds.""" + + def as_dict(self) -> dict: + """Serializes the ModelTriggerConfiguration into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.aliases: + body["aliases"] = [v for v in self.aliases] + if self.condition is not None: + body["condition"] = self.condition.value + if self.min_time_between_triggers_seconds is not None: + body["min_time_between_triggers_seconds"] = self.min_time_between_triggers_seconds + if self.securable_name is not None: + body["securable_name"] = self.securable_name + if self.wait_after_last_change_seconds is not None: + body["wait_after_last_change_seconds"] = self.wait_after_last_change_seconds + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ModelTriggerConfiguration into a shallow dictionary of its immediate attributes.""" + body = {} + if self.aliases: + body["aliases"] = self.aliases + if self.condition is not None: + body["condition"] = self.condition + if self.min_time_between_triggers_seconds is not None: + body["min_time_between_triggers_seconds"] = self.min_time_between_triggers_seconds + if self.securable_name is not None: + body["securable_name"] = self.securable_name + if self.wait_after_last_change_seconds is not None: + body["wait_after_last_change_seconds"] = self.wait_after_last_change_seconds + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ModelTriggerConfiguration: + """Deserializes the ModelTriggerConfiguration from a dictionary.""" + return cls( + aliases=d.get("aliases", None), + condition=_enum(d, "condition", ModelTriggerConfigurationCondition), + min_time_between_triggers_seconds=d.get("min_time_between_triggers_seconds", None), + securable_name=d.get("securable_name", None), + wait_after_last_change_seconds=d.get("wait_after_last_change_seconds", None), + ) + + +class ModelTriggerConfigurationCondition(Enum): + + MODEL_ALIAS_SET = "MODEL_ALIAS_SET" + MODEL_CREATED = "MODEL_CREATED" + MODEL_VERSION_READY = "MODEL_VERSION_READY" + + @dataclass class NotebookOutput: result: Optional[str] = None @@ -5624,6 +5731,10 @@ class RunTask: description: Optional[str] = None """An optional description for this task.""" + disabled: Optional[bool] = None + """An optional flag to disable the task. If set to true, the task will not run even if it is part + of a job.""" + effective_performance_target: Optional[PerformanceTarget] = None """The actual performance target used by the serverless run during execution. This can differ from the client-set performance target on the request depending on whether the performance mode is @@ -5786,6 +5897,8 @@ def as_dict(self) -> dict: body["depends_on"] = [v.as_dict() for v in self.depends_on] if self.description is not None: body["description"] = self.description + if self.disabled is not None: + body["disabled"] = self.disabled if self.effective_performance_target is not None: body["effective_performance_target"] = self.effective_performance_target.value if self.email_notifications: @@ -5883,6 +5996,8 @@ def as_shallow_dict(self) -> dict: body["depends_on"] = self.depends_on if self.description is not None: body["description"] = self.description + if self.disabled is not None: + body["disabled"] = self.disabled if self.effective_performance_target is not None: body["effective_performance_target"] = self.effective_performance_target if self.email_notifications: @@ -5970,6 +6085,7 @@ def from_dict(cls, d: Dict[str, Any]) -> RunTask: dbt_task=_from_dict(d, "dbt_task", DbtTask), depends_on=_repeated_dict(d, "depends_on", TaskDependency), description=d.get("description", None), + disabled=d.get("disabled", None), effective_performance_target=_enum(d, "effective_performance_target", PerformanceTarget), email_notifications=_from_dict(d, "email_notifications", JobEmailNotifications), end_time=d.get("end_time", None), @@ -6854,6 +6970,10 @@ class SubmitTask: description: Optional[str] = None """An optional description for this task.""" + disabled: Optional[bool] = None + """An optional flag to disable the task. If set to true, the task will not run even if it is part + of a job.""" + email_notifications: Optional[JobEmailNotifications] = None """An optional set of email addresses notified when the task run begins or completes. The default behavior is to not send any emails.""" @@ -6949,6 +7069,8 @@ def as_dict(self) -> dict: body["depends_on"] = [v.as_dict() for v in self.depends_on] if self.description is not None: body["description"] = self.description + if self.disabled is not None: + body["disabled"] = self.disabled if self.email_notifications: body["email_notifications"] = self.email_notifications.as_dict() if self.environment_key is not None: @@ -7014,6 +7136,8 @@ def as_shallow_dict(self) -> dict: body["depends_on"] = self.depends_on if self.description is not None: body["description"] = self.description + if self.disabled is not None: + body["disabled"] = self.disabled if self.email_notifications: body["email_notifications"] = self.email_notifications if self.environment_key is not None: @@ -7072,6 +7196,7 @@ def from_dict(cls, d: Dict[str, Any]) -> SubmitTask: dbt_task=_from_dict(d, "dbt_task", DbtTask), depends_on=_repeated_dict(d, "depends_on", TaskDependency), description=d.get("description", None), + disabled=d.get("disabled", None), email_notifications=_from_dict(d, "email_notifications", JobEmailNotifications), environment_key=d.get("environment_key", None), existing_cluster_id=d.get("existing_cluster_id", None), @@ -7963,6 +8088,8 @@ class TriggerSettings: file_arrival: Optional[FileArrivalTriggerConfiguration] = None """File arrival trigger settings.""" + model: Optional[ModelTriggerConfiguration] = None + pause_status: Optional[PauseStatus] = None """Whether this trigger is paused or not.""" @@ -7976,6 +8103,8 @@ def as_dict(self) -> dict: body = {} if self.file_arrival: body["file_arrival"] = self.file_arrival.as_dict() + if self.model: + body["model"] = self.model.as_dict() if self.pause_status is not None: body["pause_status"] = self.pause_status.value if self.periodic: @@ -7989,6 +8118,8 @@ def as_shallow_dict(self) -> dict: body = {} if self.file_arrival: body["file_arrival"] = self.file_arrival + if self.model: + body["model"] = self.model if self.pause_status is not None: body["pause_status"] = self.pause_status if self.periodic: @@ -8002,6 +8133,7 @@ def from_dict(cls, d: Dict[str, Any]) -> TriggerSettings: """Deserializes the TriggerSettings from a dictionary.""" return cls( file_arrival=_from_dict(d, "file_arrival", FileArrivalTriggerConfiguration), + model=_from_dict(d, "model", ModelTriggerConfiguration), pause_status=_enum(d, "pause_status", PauseStatus), periodic=_from_dict(d, "periodic", PeriodicTriggerConfiguration), table_update=_from_dict(d, "table_update", TableUpdateTriggerConfiguration), @@ -8246,7 +8378,7 @@ class JobsAPI: scalable resources. Your job can consist of a single task or can be a large, multi-task workflow with complex dependencies. Databricks manages the task orchestration, cluster management, monitoring, and error reporting for all of your jobs. You can run your jobs immediately or periodically through an easy-to-use - scheduling system. You can implement job tasks using notebooks, JARS, Delta Live Tables pipelines, or + scheduling system. You can implement job tasks using notebooks, JARS, Spark Declarative Pipelines, or Python, Scala, Spark submit, and Java applications. You should never hard code secrets or store them in plain text. Use the [Secrets CLI] to manage secrets in @@ -8361,6 +8493,7 @@ def create( name: Optional[str] = None, notification_settings: Optional[JobNotificationSettings] = None, parameters: Optional[List[JobParameterDefinition]] = None, + parent_path: Optional[str] = None, performance_target: Optional[PerformanceTarget] = None, queue: Optional[QueueSettings] = None, run_as: Optional[JobRunAs] = None, @@ -8397,9 +8530,10 @@ def create( as when this job is deleted. :param environments: List[:class:`JobEnvironment`] (optional) A list of task execution environment specifications that can be referenced by serverless tasks of - this job. An environment is required to be present for serverless tasks. For serverless notebook - tasks, the environment is accessible in the notebook environment panel. For other serverless tasks, - the task environment is required to be specified using environment_key in the task settings. + this job. For serverless notebook tasks, if the environment_key is not specified, the notebook + environment will be used if present. If a jobs environment is specified, it will override the + notebook environment. For other serverless tasks, the task environment is required to be specified + using environment_key in the task settings. :param format: :class:`Format` (optional) Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. When using the Jobs API 2.1 this value is always set to `"MULTI_TASK"`. @@ -8432,6 +8566,9 @@ def create( `email_notifications` and `webhook_notifications` for this job. :param parameters: List[:class:`JobParameterDefinition`] (optional) Job-level parameter definitions + :param parent_path: str (optional) + Path of the job parent folder in workspace file tree. If absent, the job doesn't have a workspace + object. :param performance_target: :class:`PerformanceTarget` (optional) The performance mode on a serverless job. This field determines the level of compute performance or cost-efficiency for the run. @@ -8507,6 +8644,8 @@ def create( body["notification_settings"] = notification_settings.as_dict() if parameters is not None: body["parameters"] = [v.as_dict() for v in parameters] + if parent_path is not None: + body["parent_path"] = parent_path if performance_target is not None: body["performance_target"] = performance_target.value if queue is not None: diff --git a/databricks/sdk/service/ml.py b/databricks/sdk/service/ml.py index 94fd823ca..2fe43aa93 100755 --- a/databricks/sdk/service/ml.py +++ b/databricks/sdk/service/ml.py @@ -1385,7 +1385,11 @@ class Feature: """The filter condition applied to the source data before aggregation.""" lineage_context: Optional[LineageContext] = None - """Lineage context information for this feature.""" + """WARNING: This field is primarily intended for internal use by Databricks systems and is + automatically populated when features are created through Databricks notebooks or jobs. Users + should not manually set this field as incorrect values may lead to inaccurate lineage tracking + or unexpected behavior. This field will be set by feature-engineering client and should be left + unset by SDK and terraform users.""" def as_dict(self) -> dict: """Serializes the Feature into a dictionary suitable for use as a JSON request body.""" @@ -1964,6 +1968,31 @@ def from_dict(cls, d: Dict[str, Any]) -> GetLoggedModelResponse: return cls(model=_from_dict(d, "model", LoggedModel)) +@dataclass +class GetLoggedModelsRequestResponse: + models: Optional[List[LoggedModel]] = None + """The retrieved logged models.""" + + def as_dict(self) -> dict: + """Serializes the GetLoggedModelsRequestResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.models: + body["models"] = [v.as_dict() for v in self.models] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the GetLoggedModelsRequestResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.models: + body["models"] = self.models + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> GetLoggedModelsRequestResponse: + """Deserializes the GetLoggedModelsRequestResponse from a dictionary.""" + return cls(models=_repeated_dict(d, "models", LoggedModel)) + + @dataclass class GetMetricHistoryResponse: metrics: Optional[List[Metric]] = None @@ -3154,7 +3183,7 @@ class MaterializedFeature: offline_store_config: Optional[OfflineStoreConfig] = None - online_store_config: Optional[OnlineStore] = None + online_store_config: Optional[OnlineStoreConfig] = None pipeline_schedule_state: Optional[MaterializedFeaturePipelineScheduleState] = None """The schedule state of the materialization pipeline.""" @@ -3209,7 +3238,7 @@ def from_dict(cls, d: Dict[str, Any]) -> MaterializedFeature: last_materialization_time=d.get("last_materialization_time", None), materialized_feature_id=d.get("materialized_feature_id", None), offline_store_config=_from_dict(d, "offline_store_config", OfflineStoreConfig), - online_store_config=_from_dict(d, "online_store_config", OnlineStore), + online_store_config=_from_dict(d, "online_store_config", OnlineStoreConfig), pipeline_schedule_state=_enum(d, "pipeline_schedule_state", MaterializedFeaturePipelineScheduleState), table_name=d.get("table_name", None), ) @@ -4013,6 +4042,60 @@ def from_dict(cls, d: Dict[str, Any]) -> OnlineStore: ) +@dataclass +class OnlineStoreConfig: + """Configuration for online store destination.""" + + catalog_name: str + """The Unity Catalog catalog name. This name is also used as the Lakebase logical database name.""" + + schema_name: str + """The Unity Catalog schema name.""" + + table_name_prefix: str + """Prefix for Unity Catalog table name. The materialized feature will be stored in a Lakebase table + with this prefix and a generated postfix.""" + + online_store_name: str + """The name of the target online store.""" + + def as_dict(self) -> dict: + """Serializes the OnlineStoreConfig into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.catalog_name is not None: + body["catalog_name"] = self.catalog_name + if self.online_store_name is not None: + body["online_store_name"] = self.online_store_name + if self.schema_name is not None: + body["schema_name"] = self.schema_name + if self.table_name_prefix is not None: + body["table_name_prefix"] = self.table_name_prefix + return body + + def as_shallow_dict(self) -> dict: + """Serializes the OnlineStoreConfig into a shallow dictionary of its immediate attributes.""" + body = {} + if self.catalog_name is not None: + body["catalog_name"] = self.catalog_name + if self.online_store_name is not None: + body["online_store_name"] = self.online_store_name + if self.schema_name is not None: + body["schema_name"] = self.schema_name + if self.table_name_prefix is not None: + body["table_name_prefix"] = self.table_name_prefix + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> OnlineStoreConfig: + """Deserializes the OnlineStoreConfig from a dictionary.""" + return cls( + catalog_name=d.get("catalog_name", None), + online_store_name=d.get("online_store_name", None), + schema_name=d.get("schema_name", None), + table_name_prefix=d.get("table_name_prefix", None), + ) + + class OnlineStoreState(Enum): AVAILABLE = "AVAILABLE" @@ -6076,6 +6159,25 @@ def get_logged_model(self, model_id: str) -> GetLoggedModelResponse: res = self._api.do("GET", f"/api/2.0/mlflow/logged-models/{model_id}", headers=headers) return GetLoggedModelResponse.from_dict(res) + def get_logged_models(self, *, model_ids: Optional[List[str]] = None) -> GetLoggedModelsRequestResponse: + """Batch endpoint for getting logged models from a list of model IDs + + :param model_ids: List[str] (optional) + The IDs of the logged models to retrieve. Max threshold is 100. + + :returns: :class:`GetLoggedModelsRequestResponse` + """ + + query = {} + if model_ids is not None: + query["model_ids"] = [v for v in model_ids] + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", "/api/2.0/mlflow/logged-models:batchGet", query=query, headers=headers) + return GetLoggedModelsRequestResponse.from_dict(res) + def get_permission_levels(self, experiment_id: str) -> GetExperimentPermissionLevelsResponse: """Gets the permission levels that a user can have on an object. diff --git a/databricks/sdk/service/pipelines.py b/databricks/sdk/service/pipelines.py index 9ab410419..55c79fafc 100755 --- a/databricks/sdk/service/pipelines.py +++ b/databricks/sdk/service/pipelines.py @@ -22,6 +22,51 @@ # all definitions in this file are in alphabetical order +@dataclass +class ApplyEnvironmentRequestResponse: + def as_dict(self) -> dict: + """Serializes the ApplyEnvironmentRequestResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ApplyEnvironmentRequestResponse into a shallow dictionary of its immediate attributes.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ApplyEnvironmentRequestResponse: + """Deserializes the ApplyEnvironmentRequestResponse from a dictionary.""" + return cls() + + +@dataclass +class ConnectionParameters: + source_catalog: Optional[str] = None + """Source catalog for initial connection. This is necessary for schema exploration in some database + systems like Oracle, and optional but nice-to-have in some other database systems like Postgres. + For Oracle databases, this maps to a service name.""" + + def as_dict(self) -> dict: + """Serializes the ConnectionParameters into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.source_catalog is not None: + body["source_catalog"] = self.source_catalog + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ConnectionParameters into a shallow dictionary of its immediate attributes.""" + body = {} + if self.source_catalog is not None: + body["source_catalog"] = self.source_catalog + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ConnectionParameters: + """Deserializes the ConnectionParameters from a dictionary.""" + return cls(source_catalog=d.get("source_catalog", None)) + + @dataclass class CreatePipelineResponse: effective_settings: Optional[PipelineSpec] = None @@ -352,6 +397,9 @@ class GetPipelineResponse: effective_budget_policy_id: Optional[str] = None """Serverless budget policy ID of this pipeline.""" + effective_usage_policy_id: Optional[str] = None + """Serverless usage policy ID of the pipeline.""" + health: Optional[GetPipelineResponseHealth] = None """The health of a pipeline.""" @@ -392,6 +440,8 @@ def as_dict(self) -> dict: body["creator_user_name"] = self.creator_user_name if self.effective_budget_policy_id is not None: body["effective_budget_policy_id"] = self.effective_budget_policy_id + if self.effective_usage_policy_id is not None: + body["effective_usage_policy_id"] = self.effective_usage_policy_id if self.health is not None: body["health"] = self.health.value if self.last_modified is not None: @@ -423,6 +473,8 @@ def as_shallow_dict(self) -> dict: body["creator_user_name"] = self.creator_user_name if self.effective_budget_policy_id is not None: body["effective_budget_policy_id"] = self.effective_budget_policy_id + if self.effective_usage_policy_id is not None: + body["effective_usage_policy_id"] = self.effective_usage_policy_id if self.health is not None: body["health"] = self.health if self.last_modified is not None: @@ -451,6 +503,7 @@ def from_dict(cls, d: Dict[str, Any]) -> GetPipelineResponse: cluster_id=d.get("cluster_id", None), creator_user_name=d.get("creator_user_name", None), effective_budget_policy_id=d.get("effective_budget_policy_id", None), + effective_usage_policy_id=d.get("effective_usage_policy_id", None), health=_enum(d, "health", GetPipelineResponseHealth), last_modified=d.get("last_modified", None), latest_updates=_repeated_dict(d, "latest_updates", UpdateStateInfo), @@ -554,6 +607,9 @@ class IngestionGatewayPipelineDefinition: """[Deprecated, use connection_name instead] Immutable. The Unity Catalog connection that this gateway pipeline uses to communicate with the source.""" + connection_parameters: Optional[ConnectionParameters] = None + """Optional, Internal. Parameters required to establish an initial connection with the source.""" + gateway_storage_name: Optional[str] = None """Optional. The Unity Catalog-compatible name for the gateway storage location. This is the destination to use for the data that is extracted by the gateway. Spark Declarative Pipelines @@ -566,6 +622,8 @@ def as_dict(self) -> dict: body["connection_id"] = self.connection_id if self.connection_name is not None: body["connection_name"] = self.connection_name + if self.connection_parameters: + body["connection_parameters"] = self.connection_parameters.as_dict() if self.gateway_storage_catalog is not None: body["gateway_storage_catalog"] = self.gateway_storage_catalog if self.gateway_storage_name is not None: @@ -581,6 +639,8 @@ def as_shallow_dict(self) -> dict: body["connection_id"] = self.connection_id if self.connection_name is not None: body["connection_name"] = self.connection_name + if self.connection_parameters: + body["connection_parameters"] = self.connection_parameters if self.gateway_storage_catalog is not None: body["gateway_storage_catalog"] = self.gateway_storage_catalog if self.gateway_storage_name is not None: @@ -595,6 +655,7 @@ def from_dict(cls, d: Dict[str, Any]) -> IngestionGatewayPipelineDefinition: return cls( connection_id=d.get("connection_id", None), connection_name=d.get("connection_name", None), + connection_parameters=_from_dict(d, "connection_parameters", ConnectionParameters), gateway_storage_catalog=d.get("gateway_storage_catalog", None), gateway_storage_name=d.get("gateway_storage_name", None), gateway_storage_schema=d.get("gateway_storage_schema", None), @@ -607,6 +668,11 @@ class IngestionPipelineDefinition: """Immutable. The Unity Catalog connection that this ingestion pipeline uses to communicate with the source. This is used with connectors for applications like Salesforce, Workday, and so on.""" + ingest_from_uc_foreign_catalog: Optional[bool] = None + """Immutable. If set to true, the pipeline will ingest tables from the UC foreign catalogs directly + without the need to specify a UC connection or ingestion gateway. The `source_catalog` fields in + objects of IngestionConfig are interpreted as the UC foreign catalogs to ingest from.""" + ingestion_gateway_id: Optional[str] = None """Immutable. Identifier for the gateway that is used by this ingestion pipeline to communicate with the source database. This is used with connectors to databases like SQL Server.""" @@ -634,6 +700,8 @@ def as_dict(self) -> dict: body = {} if self.connection_name is not None: body["connection_name"] = self.connection_name + if self.ingest_from_uc_foreign_catalog is not None: + body["ingest_from_uc_foreign_catalog"] = self.ingest_from_uc_foreign_catalog if self.ingestion_gateway_id is not None: body["ingestion_gateway_id"] = self.ingestion_gateway_id if self.netsuite_jar_path is not None: @@ -653,6 +721,8 @@ def as_shallow_dict(self) -> dict: body = {} if self.connection_name is not None: body["connection_name"] = self.connection_name + if self.ingest_from_uc_foreign_catalog is not None: + body["ingest_from_uc_foreign_catalog"] = self.ingest_from_uc_foreign_catalog if self.ingestion_gateway_id is not None: body["ingestion_gateway_id"] = self.ingestion_gateway_id if self.netsuite_jar_path is not None: @@ -672,6 +742,7 @@ def from_dict(cls, d: Dict[str, Any]) -> IngestionPipelineDefinition: """Deserializes the IngestionPipelineDefinition from a dictionary.""" return cls( connection_name=d.get("connection_name", None), + ingest_from_uc_foreign_catalog=d.get("ingest_from_uc_foreign_catalog", None), ingestion_gateway_id=d.get("ingestion_gateway_id", None), netsuite_jar_path=d.get("netsuite_jar_path", None), objects=_repeated_dict(d, "objects", IngestionConfig), @@ -1084,6 +1155,9 @@ class Origin: flow_name: Optional[str] = None """The name of the flow. Not unique.""" + graph_id: Optional[str] = None + """The UUID of the graph associated with this event, corresponding to a GRAPH_UPDATED event.""" + host: Optional[str] = None """The optional host name where the event was triggered""" @@ -1132,6 +1206,8 @@ def as_dict(self) -> dict: body["flow_id"] = self.flow_id if self.flow_name is not None: body["flow_name"] = self.flow_name + if self.graph_id is not None: + body["graph_id"] = self.graph_id if self.host is not None: body["host"] = self.host if self.maintenance_id is not None: @@ -1171,6 +1247,8 @@ def as_shallow_dict(self) -> dict: body["flow_id"] = self.flow_id if self.flow_name is not None: body["flow_name"] = self.flow_name + if self.graph_id is not None: + body["graph_id"] = self.graph_id if self.host is not None: body["host"] = self.host if self.maintenance_id is not None: @@ -1205,6 +1283,7 @@ def from_dict(cls, d: Dict[str, Any]) -> Origin: dataset_name=d.get("dataset_name", None), flow_id=d.get("flow_id", None), flow_name=d.get("flow_name", None), + graph_id=d.get("graph_id", None), host=d.get("host", None), maintenance_id=d.get("maintenance_id", None), materialization_name=d.get("materialization_name", None), @@ -2526,6 +2605,115 @@ def from_dict(cls, d: Dict[str, Any]) -> RestartWindow: ) +@dataclass +class RestorePipelineRequestResponse: + def as_dict(self) -> dict: + """Serializes the RestorePipelineRequestResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + def as_shallow_dict(self) -> dict: + """Serializes the RestorePipelineRequestResponse into a shallow dictionary of its immediate attributes.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> RestorePipelineRequestResponse: + """Deserializes the RestorePipelineRequestResponse from a dictionary.""" + return cls() + + +@dataclass +class RewindDatasetSpec: + """Configuration for rewinding a specific dataset.""" + + cascade: Optional[bool] = None + """Whether to cascade the rewind to dependent datasets. Must be specified.""" + + identifier: Optional[str] = None + """The identifier of the dataset (e.g., "main.foo.tbl1").""" + + reset_checkpoints: Optional[bool] = None + """Whether to reset checkpoints for this dataset.""" + + def as_dict(self) -> dict: + """Serializes the RewindDatasetSpec into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.cascade is not None: + body["cascade"] = self.cascade + if self.identifier is not None: + body["identifier"] = self.identifier + if self.reset_checkpoints is not None: + body["reset_checkpoints"] = self.reset_checkpoints + return body + + def as_shallow_dict(self) -> dict: + """Serializes the RewindDatasetSpec into a shallow dictionary of its immediate attributes.""" + body = {} + if self.cascade is not None: + body["cascade"] = self.cascade + if self.identifier is not None: + body["identifier"] = self.identifier + if self.reset_checkpoints is not None: + body["reset_checkpoints"] = self.reset_checkpoints + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> RewindDatasetSpec: + """Deserializes the RewindDatasetSpec from a dictionary.""" + return cls( + cascade=d.get("cascade", None), + identifier=d.get("identifier", None), + reset_checkpoints=d.get("reset_checkpoints", None), + ) + + +@dataclass +class RewindSpec: + """Information about a rewind being requested for this pipeline or some of the datasets in it.""" + + datasets: Optional[List[RewindDatasetSpec]] = None + """List of datasets to rewind with specific configuration for each. When not specified, all + datasets will be rewound with cascade = true and reset_checkpoints = true.""" + + dry_run: Optional[bool] = None + """If true, this is a dry run and we should emit the RewindSummary but not perform the rewind.""" + + rewind_timestamp: Optional[str] = None + """The base timestamp to rewind to. Must be specified.""" + + def as_dict(self) -> dict: + """Serializes the RewindSpec into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.datasets: + body["datasets"] = [v.as_dict() for v in self.datasets] + if self.dry_run is not None: + body["dry_run"] = self.dry_run + if self.rewind_timestamp is not None: + body["rewind_timestamp"] = self.rewind_timestamp + return body + + def as_shallow_dict(self) -> dict: + """Serializes the RewindSpec into a shallow dictionary of its immediate attributes.""" + body = {} + if self.datasets: + body["datasets"] = self.datasets + if self.dry_run is not None: + body["dry_run"] = self.dry_run + if self.rewind_timestamp is not None: + body["rewind_timestamp"] = self.rewind_timestamp + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> RewindSpec: + """Deserializes the RewindSpec from a dictionary.""" + return cls( + datasets=_repeated_dict(d, "datasets", RewindDatasetSpec), + dry_run=d.get("dry_run", None), + rewind_timestamp=d.get("rewind_timestamp", None), + ) + + @dataclass class RunAs: """Write-only setting, available only in Create/Update calls. Specifies the user or service @@ -2971,6 +3159,10 @@ class TableSpecificConfig: None ) + row_filter: Optional[str] = None + """(Optional, Immutable) The row filter condition to be applied to the table. It must not contain + the WHERE keyword, only the actual filter condition. It must be in DBSQL format.""" + salesforce_include_formula_fields: Optional[bool] = None """If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector""" @@ -2996,6 +3188,8 @@ def as_dict(self) -> dict: body["primary_keys"] = [v for v in self.primary_keys] if self.query_based_connector_config: body["query_based_connector_config"] = self.query_based_connector_config.as_dict() + if self.row_filter is not None: + body["row_filter"] = self.row_filter if self.salesforce_include_formula_fields is not None: body["salesforce_include_formula_fields"] = self.salesforce_include_formula_fields if self.scd_type is not None: @@ -3017,6 +3211,8 @@ def as_shallow_dict(self) -> dict: body["primary_keys"] = self.primary_keys if self.query_based_connector_config: body["query_based_connector_config"] = self.query_based_connector_config + if self.row_filter is not None: + body["row_filter"] = self.row_filter if self.salesforce_include_formula_fields is not None: body["salesforce_include_formula_fields"] = self.salesforce_include_formula_fields if self.scd_type is not None: @@ -3039,6 +3235,7 @@ def from_dict(cls, d: Dict[str, Any]) -> TableSpecificConfig: "query_based_connector_config", IngestionPipelineDefinitionTableSpecificConfigQueryBasedConnectorConfig, ), + row_filter=d.get("row_filter", None), salesforce_include_formula_fields=d.get("salesforce_include_formula_fields", None), scd_type=_enum(d, "scd_type", TableSpecificConfigScdType), sequence_by=d.get("sequence_by", None), @@ -3079,6 +3276,10 @@ class UpdateInfo: full_refresh_selection are empty, this is a full graph update. Full Refresh on a table means that the states of the table will be reset before the refresh.""" + mode: Optional[UpdateMode] = None + """Indicates whether the update is either part of a continuous job run, or running in legacy + continuous pipeline mode.""" + pipeline_id: Optional[str] = None """The ID of the pipeline.""" @@ -3112,6 +3313,8 @@ def as_dict(self) -> dict: body["full_refresh"] = self.full_refresh if self.full_refresh_selection: body["full_refresh_selection"] = [v for v in self.full_refresh_selection] + if self.mode is not None: + body["mode"] = self.mode.value if self.pipeline_id is not None: body["pipeline_id"] = self.pipeline_id if self.refresh_selection: @@ -3139,6 +3342,8 @@ def as_shallow_dict(self) -> dict: body["full_refresh"] = self.full_refresh if self.full_refresh_selection: body["full_refresh_selection"] = self.full_refresh_selection + if self.mode is not None: + body["mode"] = self.mode if self.pipeline_id is not None: body["pipeline_id"] = self.pipeline_id if self.refresh_selection: @@ -3161,6 +3366,7 @@ def from_dict(cls, d: Dict[str, Any]) -> UpdateInfo: creation_time=d.get("creation_time", None), full_refresh=d.get("full_refresh", None), full_refresh_selection=d.get("full_refresh_selection", None), + mode=_enum(d, "mode", UpdateMode), pipeline_id=d.get("pipeline_id", None), refresh_selection=d.get("refresh_selection", None), state=_enum(d, "state", UpdateInfoState), @@ -3197,6 +3403,12 @@ class UpdateInfoState(Enum): WAITING_FOR_RESOURCES = "WAITING_FOR_RESOURCES" +class UpdateMode(Enum): + + CONTINUOUS = "CONTINUOUS" + DEFAULT = "DEFAULT" + + @dataclass class UpdateStateInfo: creation_time: Optional[str] = None @@ -3301,6 +3513,22 @@ def wait_get_pipeline_idle( attempt += 1 raise TimeoutError(f"timed out after {timeout}: {status_message}") + def apply_environment(self, pipeline_id: str) -> ApplyEnvironmentRequestResponse: + """* Applies the current pipeline environment onto the pipeline compute. The environment applied can be + used by subsequent dev-mode updates. + + :param pipeline_id: str + + :returns: :class:`ApplyEnvironmentRequestResponse` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do("POST", f"/api/2.0/pipelines/{pipeline_id}/environment/apply", headers=headers) + return ApplyEnvironmentRequestResponse.from_dict(res) + def create( self, *, @@ -3483,8 +3711,8 @@ def create( return CreatePipelineResponse.from_dict(res) def delete(self, pipeline_id: str): - """Deletes a pipeline. Deleting a pipeline is a permanent action that stops and removes the pipeline and - its tables. You cannot undo this action. + """Deletes a pipeline. If the pipeline publishes to Unity Catalog, pipeline deletion will cascade to all + pipeline tables. Please reach out to Databricks support for assistance to undo this action. :param pipeline_id: str @@ -3709,6 +3937,23 @@ def list_updates( res = self._api.do("GET", f"/api/2.0/pipelines/{pipeline_id}/updates", query=query, headers=headers) return ListUpdatesResponse.from_dict(res) + def restore_pipeline(self, pipeline_id: str) -> RestorePipelineRequestResponse: + """* Restores a pipeline that was previously deleted, if within the restoration window. All tables + deleted at pipeline deletion will be undropped as well. + + :param pipeline_id: str + The ID of the pipeline to restore + + :returns: :class:`RestorePipelineRequestResponse` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do("POST", f"/api/2.0/pipelines/{pipeline_id}/restore", headers=headers) + return RestorePipelineRequestResponse.from_dict(res) + def set_permissions( self, pipeline_id: str, *, access_control_list: Optional[List[PipelineAccessControlRequest]] = None ) -> PipelinePermissions: @@ -3741,6 +3986,7 @@ def start_update( full_refresh: Optional[bool] = None, full_refresh_selection: Optional[List[str]] = None, refresh_selection: Optional[List[str]] = None, + rewind_spec: Optional[RewindSpec] = None, validate_only: Optional[bool] = None, ) -> StartUpdateResponse: """Starts a new update for the pipeline. If there is already an active update for the pipeline, the @@ -3758,6 +4004,8 @@ def start_update( A list of tables to update without fullRefresh. If both refresh_selection and full_refresh_selection are empty, this is a full graph update. Full Refresh on a table means that the states of the table will be reset before the refresh. + :param rewind_spec: :class:`RewindSpec` (optional) + The information about the requested rewind operation. If specified this is a rewind mode update. :param validate_only: bool (optional) If true, this update only validates the correctness of pipeline source code but does not materialize or publish any datasets. @@ -3774,6 +4022,8 @@ def start_update( body["full_refresh_selection"] = [v for v in full_refresh_selection] if refresh_selection is not None: body["refresh_selection"] = [v for v in refresh_selection] + if rewind_spec is not None: + body["rewind_spec"] = rewind_spec.as_dict() if validate_only is not None: body["validate_only"] = validate_only headers = { diff --git a/databricks/sdk/service/postgres.py b/databricks/sdk/service/postgres.py new file mode 100755 index 000000000..120f01005 --- /dev/null +++ b/databricks/sdk/service/postgres.py @@ -0,0 +1,1974 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +from __future__ import annotations + +import logging +from dataclasses import dataclass +from enum import Enum +from typing import Any, Dict, Iterator, List, Optional + +from databricks.sdk.common import lro +from databricks.sdk.retries import RetryError, poll +from databricks.sdk.service._internal import _enum, _from_dict, _repeated_dict + +_LOG = logging.getLogger("databricks.sdk") + + +# all definitions in this file are in alphabetical order + + +@dataclass +class DatabaseBranch: + create_time: Optional[str] = None + """A timestamp indicating when the branch was created.""" + + current_state: Optional[str] = None + """The branch’s state, indicating if it is initializing, ready for use, or archived.""" + + default: Optional[bool] = None + """Whether the branch is the project's default branch. This field is only returned on create/update + responses. See effective_default for the value that is actually applied to the database branch.""" + + effective_default: Optional[bool] = None + """Whether the branch is the project's default branch.""" + + is_protected: Optional[bool] = None + """Whether the branch is protected.""" + + logical_size_bytes: Optional[int] = None + """The logical size of the branch.""" + + name: Optional[str] = None + """The resource name of the branch. Format: projects/{project_id}/branches/{branch_id}""" + + parent: Optional[str] = None + """The parent to list branches from. Format: projects/{project_id}""" + + parent_branch: Optional[str] = None + """The parent branch Format: projects/{project_id}/branches/{branch_id}""" + + parent_branch_lsn: Optional[str] = None + """The Log Sequence Number (LSN) on the parent branch from which this branch was created. When + restoring a branch using the Restore Database Branch endpoint, this value isn’t finalized + until all operations related to the restore have completed successfully.""" + + parent_branch_time: Optional[str] = None + """The point in time on the parent branch from which this branch was created.""" + + pending_state: Optional[str] = None + + state_change_time: Optional[str] = None + """A timestamp indicating when the `current_state` began.""" + + update_time: Optional[str] = None + """A timestamp indicating when the branch was last updated.""" + + def as_dict(self) -> dict: + """Serializes the DatabaseBranch into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.create_time is not None: + body["create_time"] = self.create_time + if self.current_state is not None: + body["current_state"] = self.current_state + if self.default is not None: + body["default"] = self.default + if self.effective_default is not None: + body["effective_default"] = self.effective_default + if self.is_protected is not None: + body["is_protected"] = self.is_protected + if self.logical_size_bytes is not None: + body["logical_size_bytes"] = self.logical_size_bytes + if self.name is not None: + body["name"] = self.name + if self.parent is not None: + body["parent"] = self.parent + if self.parent_branch is not None: + body["parent_branch"] = self.parent_branch + if self.parent_branch_lsn is not None: + body["parent_branch_lsn"] = self.parent_branch_lsn + if self.parent_branch_time is not None: + body["parent_branch_time"] = self.parent_branch_time + if self.pending_state is not None: + body["pending_state"] = self.pending_state + if self.state_change_time is not None: + body["state_change_time"] = self.state_change_time + if self.update_time is not None: + body["update_time"] = self.update_time + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DatabaseBranch into a shallow dictionary of its immediate attributes.""" + body = {} + if self.create_time is not None: + body["create_time"] = self.create_time + if self.current_state is not None: + body["current_state"] = self.current_state + if self.default is not None: + body["default"] = self.default + if self.effective_default is not None: + body["effective_default"] = self.effective_default + if self.is_protected is not None: + body["is_protected"] = self.is_protected + if self.logical_size_bytes is not None: + body["logical_size_bytes"] = self.logical_size_bytes + if self.name is not None: + body["name"] = self.name + if self.parent is not None: + body["parent"] = self.parent + if self.parent_branch is not None: + body["parent_branch"] = self.parent_branch + if self.parent_branch_lsn is not None: + body["parent_branch_lsn"] = self.parent_branch_lsn + if self.parent_branch_time is not None: + body["parent_branch_time"] = self.parent_branch_time + if self.pending_state is not None: + body["pending_state"] = self.pending_state + if self.state_change_time is not None: + body["state_change_time"] = self.state_change_time + if self.update_time is not None: + body["update_time"] = self.update_time + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DatabaseBranch: + """Deserializes the DatabaseBranch from a dictionary.""" + return cls( + create_time=d.get("create_time", None), + current_state=d.get("current_state", None), + default=d.get("default", None), + effective_default=d.get("effective_default", None), + is_protected=d.get("is_protected", None), + logical_size_bytes=d.get("logical_size_bytes", None), + name=d.get("name", None), + parent=d.get("parent", None), + parent_branch=d.get("parent_branch", None), + parent_branch_lsn=d.get("parent_branch_lsn", None), + parent_branch_time=d.get("parent_branch_time", None), + pending_state=d.get("pending_state", None), + state_change_time=d.get("state_change_time", None), + update_time=d.get("update_time", None), + ) + + +@dataclass +class DatabaseBranchOperationMetadata: + def as_dict(self) -> dict: + """Serializes the DatabaseBranchOperationMetadata into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DatabaseBranchOperationMetadata into a shallow dictionary of its immediate attributes.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DatabaseBranchOperationMetadata: + """Deserializes the DatabaseBranchOperationMetadata from a dictionary.""" + return cls() + + +@dataclass +class DatabaseEndpoint: + autoscaling_limit_max_cu: Optional[float] = None + """The maximum number of Compute Units.""" + + autoscaling_limit_min_cu: Optional[float] = None + """The minimum number of Compute Units.""" + + create_time: Optional[str] = None + """A timestamp indicating when the compute endpoint was created.""" + + current_state: Optional[DatabaseEndpointState] = None + + disabled: Optional[bool] = None + """Whether to restrict connections to the compute endpoint. Enabling this option schedules a + suspend compute operation. A disabled compute endpoint cannot be enabled by a connection or + console action.""" + + host: Optional[str] = None + """The hostname of the compute endpoint. This is the hostname specified when connecting to a + database.""" + + last_active_time: Optional[str] = None + """A timestamp indicating when the compute endpoint was last active.""" + + name: Optional[str] = None + """The resource name of the endpoint. Format: + projects/{project_id}/branches/{branch_id}/endpoints/{endpoint_id}""" + + parent: Optional[str] = None + """The parent to list endpoints from. Format: projects/{project_id}/branches/{branch_id}""" + + pending_state: Optional[DatabaseEndpointState] = None + + pooler_mode: Optional[DatabaseEndpointPoolerMode] = None + + settings: Optional[DatabaseEndpointSettings] = None + + start_time: Optional[str] = None + """A timestamp indicating when the compute endpoint was last started.""" + + suspend_time: Optional[str] = None + """A timestamp indicating when the compute endpoint was last suspended.""" + + suspend_timeout_duration: Optional[str] = None + """Duration of inactivity after which the compute endpoint is automatically suspended.""" + + type: Optional[DatabaseEndpointType] = None + """NOTE: if want type to default to some value set the server then an effective_type field OR make + this field REQUIRED""" + + update_time: Optional[str] = None + """A timestamp indicating when the compute endpoint was last updated.""" + + def as_dict(self) -> dict: + """Serializes the DatabaseEndpoint into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.autoscaling_limit_max_cu is not None: + body["autoscaling_limit_max_cu"] = self.autoscaling_limit_max_cu + if self.autoscaling_limit_min_cu is not None: + body["autoscaling_limit_min_cu"] = self.autoscaling_limit_min_cu + if self.create_time is not None: + body["create_time"] = self.create_time + if self.current_state is not None: + body["current_state"] = self.current_state.value + if self.disabled is not None: + body["disabled"] = self.disabled + if self.host is not None: + body["host"] = self.host + if self.last_active_time is not None: + body["last_active_time"] = self.last_active_time + if self.name is not None: + body["name"] = self.name + if self.parent is not None: + body["parent"] = self.parent + if self.pending_state is not None: + body["pending_state"] = self.pending_state.value + if self.pooler_mode is not None: + body["pooler_mode"] = self.pooler_mode.value + if self.settings: + body["settings"] = self.settings.as_dict() + if self.start_time is not None: + body["start_time"] = self.start_time + if self.suspend_time is not None: + body["suspend_time"] = self.suspend_time + if self.suspend_timeout_duration is not None: + body["suspend_timeout_duration"] = self.suspend_timeout_duration + if self.type is not None: + body["type"] = self.type.value + if self.update_time is not None: + body["update_time"] = self.update_time + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DatabaseEndpoint into a shallow dictionary of its immediate attributes.""" + body = {} + if self.autoscaling_limit_max_cu is not None: + body["autoscaling_limit_max_cu"] = self.autoscaling_limit_max_cu + if self.autoscaling_limit_min_cu is not None: + body["autoscaling_limit_min_cu"] = self.autoscaling_limit_min_cu + if self.create_time is not None: + body["create_time"] = self.create_time + if self.current_state is not None: + body["current_state"] = self.current_state + if self.disabled is not None: + body["disabled"] = self.disabled + if self.host is not None: + body["host"] = self.host + if self.last_active_time is not None: + body["last_active_time"] = self.last_active_time + if self.name is not None: + body["name"] = self.name + if self.parent is not None: + body["parent"] = self.parent + if self.pending_state is not None: + body["pending_state"] = self.pending_state + if self.pooler_mode is not None: + body["pooler_mode"] = self.pooler_mode + if self.settings: + body["settings"] = self.settings + if self.start_time is not None: + body["start_time"] = self.start_time + if self.suspend_time is not None: + body["suspend_time"] = self.suspend_time + if self.suspend_timeout_duration is not None: + body["suspend_timeout_duration"] = self.suspend_timeout_duration + if self.type is not None: + body["type"] = self.type + if self.update_time is not None: + body["update_time"] = self.update_time + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DatabaseEndpoint: + """Deserializes the DatabaseEndpoint from a dictionary.""" + return cls( + autoscaling_limit_max_cu=d.get("autoscaling_limit_max_cu", None), + autoscaling_limit_min_cu=d.get("autoscaling_limit_min_cu", None), + create_time=d.get("create_time", None), + current_state=_enum(d, "current_state", DatabaseEndpointState), + disabled=d.get("disabled", None), + host=d.get("host", None), + last_active_time=d.get("last_active_time", None), + name=d.get("name", None), + parent=d.get("parent", None), + pending_state=_enum(d, "pending_state", DatabaseEndpointState), + pooler_mode=_enum(d, "pooler_mode", DatabaseEndpointPoolerMode), + settings=_from_dict(d, "settings", DatabaseEndpointSettings), + start_time=d.get("start_time", None), + suspend_time=d.get("suspend_time", None), + suspend_timeout_duration=d.get("suspend_timeout_duration", None), + type=_enum(d, "type", DatabaseEndpointType), + update_time=d.get("update_time", None), + ) + + +@dataclass +class DatabaseEndpointOperationMetadata: + def as_dict(self) -> dict: + """Serializes the DatabaseEndpointOperationMetadata into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DatabaseEndpointOperationMetadata into a shallow dictionary of its immediate attributes.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DatabaseEndpointOperationMetadata: + """Deserializes the DatabaseEndpointOperationMetadata from a dictionary.""" + return cls() + + +class DatabaseEndpointPoolerMode(Enum): + """The connection pooler mode. Lakebase supports PgBouncer in `transaction` mode only.""" + + TRANSACTION = "TRANSACTION" + + +@dataclass +class DatabaseEndpointSettings: + """A collection of settings for a compute endpoint""" + + pg_settings: Optional[Dict[str, str]] = None + """A raw representation of Postgres settings.""" + + pgbouncer_settings: Optional[Dict[str, str]] = None + """A raw representation of PgBouncer settings.""" + + def as_dict(self) -> dict: + """Serializes the DatabaseEndpointSettings into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.pg_settings: + body["pg_settings"] = self.pg_settings + if self.pgbouncer_settings: + body["pgbouncer_settings"] = self.pgbouncer_settings + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DatabaseEndpointSettings into a shallow dictionary of its immediate attributes.""" + body = {} + if self.pg_settings: + body["pg_settings"] = self.pg_settings + if self.pgbouncer_settings: + body["pgbouncer_settings"] = self.pgbouncer_settings + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DatabaseEndpointSettings: + """Deserializes the DatabaseEndpointSettings from a dictionary.""" + return cls(pg_settings=d.get("pg_settings", None), pgbouncer_settings=d.get("pgbouncer_settings", None)) + + +class DatabaseEndpointState(Enum): + """The state of the compute endpoint""" + + ACTIVE = "ACTIVE" + IDLE = "IDLE" + INIT = "INIT" + + +class DatabaseEndpointType(Enum): + """The compute endpoint type. Either `read_write` or `read_only`.""" + + READ_ONLY = "READ_ONLY" + READ_WRITE = "READ_WRITE" + + +@dataclass +class DatabaseProject: + branch_logical_size_limit_bytes: Optional[int] = None + """The logical size limit for a branch.""" + + budget_policy_id: Optional[str] = None + """The desired budget policy to associate with the instance. This field is only returned on + create/update responses, and represents the customer provided budget policy. See + effective_budget_policy_id for the policy that is actually applied to the instance.""" + + compute_last_active_time: Optional[str] = None + """The most recent time when any endpoint of this project was active.""" + + create_time: Optional[str] = None + """A timestamp indicating when the project was created.""" + + custom_tags: Optional[List[DatabaseProjectCustomTag]] = None + """Custom tags associated with the instance.""" + + default_endpoint_settings: Optional[DatabaseProjectDefaultEndpointSettings] = None + + display_name: Optional[str] = None + """Human-readable project name.""" + + effective_budget_policy_id: Optional[str] = None + """The policy that is applied to the instance.""" + + effective_default_endpoint_settings: Optional[DatabaseProjectDefaultEndpointSettings] = None + + effective_display_name: Optional[str] = None + + effective_history_retention_duration: Optional[str] = None + + effective_pg_version: Optional[int] = None + + effective_settings: Optional[DatabaseProjectSettings] = None + + history_retention_duration: Optional[str] = None + """The number of seconds to retain the shared history for point in time recovery for all branches + in this project.""" + + name: Optional[str] = None + """The resource name of the project. Format: projects/{project_id}""" + + pg_version: Optional[int] = None + """The major Postgres version number.""" + + settings: Optional[DatabaseProjectSettings] = None + + synthetic_storage_size_bytes: Optional[int] = None + """The current space occupied by the project in storage. Synthetic storage size combines the + logical data size and Write-Ahead Log (WAL) size for all branches in a project.""" + + update_time: Optional[str] = None + """A timestamp indicating when the project was last updated.""" + + def as_dict(self) -> dict: + """Serializes the DatabaseProject into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.branch_logical_size_limit_bytes is not None: + body["branch_logical_size_limit_bytes"] = self.branch_logical_size_limit_bytes + if self.budget_policy_id is not None: + body["budget_policy_id"] = self.budget_policy_id + if self.compute_last_active_time is not None: + body["compute_last_active_time"] = self.compute_last_active_time + if self.create_time is not None: + body["create_time"] = self.create_time + if self.custom_tags: + body["custom_tags"] = [v.as_dict() for v in self.custom_tags] + if self.default_endpoint_settings: + body["default_endpoint_settings"] = self.default_endpoint_settings.as_dict() + if self.display_name is not None: + body["display_name"] = self.display_name + if self.effective_budget_policy_id is not None: + body["effective_budget_policy_id"] = self.effective_budget_policy_id + if self.effective_default_endpoint_settings: + body["effective_default_endpoint_settings"] = self.effective_default_endpoint_settings.as_dict() + if self.effective_display_name is not None: + body["effective_display_name"] = self.effective_display_name + if self.effective_history_retention_duration is not None: + body["effective_history_retention_duration"] = self.effective_history_retention_duration + if self.effective_pg_version is not None: + body["effective_pg_version"] = self.effective_pg_version + if self.effective_settings: + body["effective_settings"] = self.effective_settings.as_dict() + if self.history_retention_duration is not None: + body["history_retention_duration"] = self.history_retention_duration + if self.name is not None: + body["name"] = self.name + if self.pg_version is not None: + body["pg_version"] = self.pg_version + if self.settings: + body["settings"] = self.settings.as_dict() + if self.synthetic_storage_size_bytes is not None: + body["synthetic_storage_size_bytes"] = self.synthetic_storage_size_bytes + if self.update_time is not None: + body["update_time"] = self.update_time + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DatabaseProject into a shallow dictionary of its immediate attributes.""" + body = {} + if self.branch_logical_size_limit_bytes is not None: + body["branch_logical_size_limit_bytes"] = self.branch_logical_size_limit_bytes + if self.budget_policy_id is not None: + body["budget_policy_id"] = self.budget_policy_id + if self.compute_last_active_time is not None: + body["compute_last_active_time"] = self.compute_last_active_time + if self.create_time is not None: + body["create_time"] = self.create_time + if self.custom_tags: + body["custom_tags"] = self.custom_tags + if self.default_endpoint_settings: + body["default_endpoint_settings"] = self.default_endpoint_settings + if self.display_name is not None: + body["display_name"] = self.display_name + if self.effective_budget_policy_id is not None: + body["effective_budget_policy_id"] = self.effective_budget_policy_id + if self.effective_default_endpoint_settings: + body["effective_default_endpoint_settings"] = self.effective_default_endpoint_settings + if self.effective_display_name is not None: + body["effective_display_name"] = self.effective_display_name + if self.effective_history_retention_duration is not None: + body["effective_history_retention_duration"] = self.effective_history_retention_duration + if self.effective_pg_version is not None: + body["effective_pg_version"] = self.effective_pg_version + if self.effective_settings: + body["effective_settings"] = self.effective_settings + if self.history_retention_duration is not None: + body["history_retention_duration"] = self.history_retention_duration + if self.name is not None: + body["name"] = self.name + if self.pg_version is not None: + body["pg_version"] = self.pg_version + if self.settings: + body["settings"] = self.settings + if self.synthetic_storage_size_bytes is not None: + body["synthetic_storage_size_bytes"] = self.synthetic_storage_size_bytes + if self.update_time is not None: + body["update_time"] = self.update_time + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DatabaseProject: + """Deserializes the DatabaseProject from a dictionary.""" + return cls( + branch_logical_size_limit_bytes=d.get("branch_logical_size_limit_bytes", None), + budget_policy_id=d.get("budget_policy_id", None), + compute_last_active_time=d.get("compute_last_active_time", None), + create_time=d.get("create_time", None), + custom_tags=_repeated_dict(d, "custom_tags", DatabaseProjectCustomTag), + default_endpoint_settings=_from_dict( + d, "default_endpoint_settings", DatabaseProjectDefaultEndpointSettings + ), + display_name=d.get("display_name", None), + effective_budget_policy_id=d.get("effective_budget_policy_id", None), + effective_default_endpoint_settings=_from_dict( + d, "effective_default_endpoint_settings", DatabaseProjectDefaultEndpointSettings + ), + effective_display_name=d.get("effective_display_name", None), + effective_history_retention_duration=d.get("effective_history_retention_duration", None), + effective_pg_version=d.get("effective_pg_version", None), + effective_settings=_from_dict(d, "effective_settings", DatabaseProjectSettings), + history_retention_duration=d.get("history_retention_duration", None), + name=d.get("name", None), + pg_version=d.get("pg_version", None), + settings=_from_dict(d, "settings", DatabaseProjectSettings), + synthetic_storage_size_bytes=d.get("synthetic_storage_size_bytes", None), + update_time=d.get("update_time", None), + ) + + +@dataclass +class DatabaseProjectCustomTag: + key: Optional[str] = None + """The key of the custom tag.""" + + value: Optional[str] = None + """The value of the custom tag.""" + + def as_dict(self) -> dict: + """Serializes the DatabaseProjectCustomTag into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.key is not None: + body["key"] = self.key + if self.value is not None: + body["value"] = self.value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DatabaseProjectCustomTag into a shallow dictionary of its immediate attributes.""" + body = {} + if self.key is not None: + body["key"] = self.key + if self.value is not None: + body["value"] = self.value + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DatabaseProjectCustomTag: + """Deserializes the DatabaseProjectCustomTag from a dictionary.""" + return cls(key=d.get("key", None), value=d.get("value", None)) + + +@dataclass +class DatabaseProjectDefaultEndpointSettings: + """A collection of settings for a database endpoint.""" + + autoscaling_limit_max_cu: Optional[float] = None + """The maximum number of Compute Units.""" + + autoscaling_limit_min_cu: Optional[float] = None + """The minimum number of Compute Units.""" + + pg_settings: Optional[Dict[str, str]] = None + """A raw representation of Postgres settings.""" + + pgbouncer_settings: Optional[Dict[str, str]] = None + """A raw representation of PgBouncer settings.""" + + suspend_timeout_duration: Optional[str] = None + """Duration of inactivity after which the compute endpoint is automatically suspended.""" + + def as_dict(self) -> dict: + """Serializes the DatabaseProjectDefaultEndpointSettings into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.autoscaling_limit_max_cu is not None: + body["autoscaling_limit_max_cu"] = self.autoscaling_limit_max_cu + if self.autoscaling_limit_min_cu is not None: + body["autoscaling_limit_min_cu"] = self.autoscaling_limit_min_cu + if self.pg_settings: + body["pg_settings"] = self.pg_settings + if self.pgbouncer_settings: + body["pgbouncer_settings"] = self.pgbouncer_settings + if self.suspend_timeout_duration is not None: + body["suspend_timeout_duration"] = self.suspend_timeout_duration + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DatabaseProjectDefaultEndpointSettings into a shallow dictionary of its immediate attributes.""" + body = {} + if self.autoscaling_limit_max_cu is not None: + body["autoscaling_limit_max_cu"] = self.autoscaling_limit_max_cu + if self.autoscaling_limit_min_cu is not None: + body["autoscaling_limit_min_cu"] = self.autoscaling_limit_min_cu + if self.pg_settings: + body["pg_settings"] = self.pg_settings + if self.pgbouncer_settings: + body["pgbouncer_settings"] = self.pgbouncer_settings + if self.suspend_timeout_duration is not None: + body["suspend_timeout_duration"] = self.suspend_timeout_duration + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DatabaseProjectDefaultEndpointSettings: + """Deserializes the DatabaseProjectDefaultEndpointSettings from a dictionary.""" + return cls( + autoscaling_limit_max_cu=d.get("autoscaling_limit_max_cu", None), + autoscaling_limit_min_cu=d.get("autoscaling_limit_min_cu", None), + pg_settings=d.get("pg_settings", None), + pgbouncer_settings=d.get("pgbouncer_settings", None), + suspend_timeout_duration=d.get("suspend_timeout_duration", None), + ) + + +@dataclass +class DatabaseProjectOperationMetadata: + def as_dict(self) -> dict: + """Serializes the DatabaseProjectOperationMetadata into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DatabaseProjectOperationMetadata into a shallow dictionary of its immediate attributes.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DatabaseProjectOperationMetadata: + """Deserializes the DatabaseProjectOperationMetadata from a dictionary.""" + return cls() + + +@dataclass +class DatabaseProjectSettings: + enable_logical_replication: Optional[bool] = None + """Sets wal_level=logical for all compute endpoints in this project. All active endpoints will be + suspended. Once enabled, logical replication cannot be disabled.""" + + def as_dict(self) -> dict: + """Serializes the DatabaseProjectSettings into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.enable_logical_replication is not None: + body["enable_logical_replication"] = self.enable_logical_replication + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DatabaseProjectSettings into a shallow dictionary of its immediate attributes.""" + body = {} + if self.enable_logical_replication is not None: + body["enable_logical_replication"] = self.enable_logical_replication + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DatabaseProjectSettings: + """Deserializes the DatabaseProjectSettings from a dictionary.""" + return cls(enable_logical_replication=d.get("enable_logical_replication", None)) + + +@dataclass +class DatabricksServiceExceptionWithDetailsProto: + """Databricks Error that is returned by all Databricks APIs.""" + + details: Optional[List[dict]] = None + """@pbjson-skip""" + + error_code: Optional[ErrorCode] = None + + message: Optional[str] = None + + stack_trace: Optional[str] = None + + def as_dict(self) -> dict: + """Serializes the DatabricksServiceExceptionWithDetailsProto into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.details: + body["details"] = [v for v in self.details] + if self.error_code is not None: + body["error_code"] = self.error_code.value + if self.message is not None: + body["message"] = self.message + if self.stack_trace is not None: + body["stack_trace"] = self.stack_trace + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DatabricksServiceExceptionWithDetailsProto into a shallow dictionary of its immediate attributes.""" + body = {} + if self.details: + body["details"] = self.details + if self.error_code is not None: + body["error_code"] = self.error_code + if self.message is not None: + body["message"] = self.message + if self.stack_trace is not None: + body["stack_trace"] = self.stack_trace + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DatabricksServiceExceptionWithDetailsProto: + """Deserializes the DatabricksServiceExceptionWithDetailsProto from a dictionary.""" + return cls( + details=d.get("details", None), + error_code=_enum(d, "error_code", ErrorCode), + message=d.get("message", None), + stack_trace=d.get("stack_trace", None), + ) + + +class ErrorCode(Enum): + """Legacy definition of the ErrorCode enum. Please keep in sync with + api-base/proto/error_code.proto (except status code mapping annotations as this file doesn't + have them). Will be removed eventually, pending the ScalaPB 0.4 cleanup.""" + + ABORTED = "ABORTED" + ALREADY_EXISTS = "ALREADY_EXISTS" + BAD_REQUEST = "BAD_REQUEST" + CANCELLED = "CANCELLED" + CATALOG_ALREADY_EXISTS = "CATALOG_ALREADY_EXISTS" + CATALOG_DOES_NOT_EXIST = "CATALOG_DOES_NOT_EXIST" + CATALOG_NOT_EMPTY = "CATALOG_NOT_EMPTY" + COULD_NOT_ACQUIRE_LOCK = "COULD_NOT_ACQUIRE_LOCK" + CUSTOMER_UNAUTHORIZED = "CUSTOMER_UNAUTHORIZED" + DAC_ALREADY_EXISTS = "DAC_ALREADY_EXISTS" + DAC_DOES_NOT_EXIST = "DAC_DOES_NOT_EXIST" + DATA_LOSS = "DATA_LOSS" + DEADLINE_EXCEEDED = "DEADLINE_EXCEEDED" + DEPLOYMENT_TIMEOUT = "DEPLOYMENT_TIMEOUT" + DIRECTORY_NOT_EMPTY = "DIRECTORY_NOT_EMPTY" + DIRECTORY_PROTECTED = "DIRECTORY_PROTECTED" + DRY_RUN_FAILED = "DRY_RUN_FAILED" + ENDPOINT_NOT_FOUND = "ENDPOINT_NOT_FOUND" + EXTERNAL_LOCATION_ALREADY_EXISTS = "EXTERNAL_LOCATION_ALREADY_EXISTS" + EXTERNAL_LOCATION_DOES_NOT_EXIST = "EXTERNAL_LOCATION_DOES_NOT_EXIST" + FEATURE_DISABLED = "FEATURE_DISABLED" + GIT_CONFLICT = "GIT_CONFLICT" + GIT_REMOTE_ERROR = "GIT_REMOTE_ERROR" + GIT_SENSITIVE_TOKEN_DETECTED = "GIT_SENSITIVE_TOKEN_DETECTED" + GIT_UNKNOWN_REF = "GIT_UNKNOWN_REF" + GIT_URL_NOT_ON_ALLOW_LIST = "GIT_URL_NOT_ON_ALLOW_LIST" + INSECURE_PARTNER_RESPONSE = "INSECURE_PARTNER_RESPONSE" + INTERNAL_ERROR = "INTERNAL_ERROR" + INVALID_PARAMETER_VALUE = "INVALID_PARAMETER_VALUE" + INVALID_STATE = "INVALID_STATE" + INVALID_STATE_TRANSITION = "INVALID_STATE_TRANSITION" + IO_ERROR = "IO_ERROR" + IPYNB_FILE_IN_REPO = "IPYNB_FILE_IN_REPO" + MALFORMED_PARTNER_RESPONSE = "MALFORMED_PARTNER_RESPONSE" + MALFORMED_REQUEST = "MALFORMED_REQUEST" + MANAGED_RESOURCE_GROUP_DOES_NOT_EXIST = "MANAGED_RESOURCE_GROUP_DOES_NOT_EXIST" + MAX_BLOCK_SIZE_EXCEEDED = "MAX_BLOCK_SIZE_EXCEEDED" + MAX_CHILD_NODE_SIZE_EXCEEDED = "MAX_CHILD_NODE_SIZE_EXCEEDED" + MAX_LIST_SIZE_EXCEEDED = "MAX_LIST_SIZE_EXCEEDED" + MAX_NOTEBOOK_SIZE_EXCEEDED = "MAX_NOTEBOOK_SIZE_EXCEEDED" + MAX_READ_SIZE_EXCEEDED = "MAX_READ_SIZE_EXCEEDED" + METASTORE_ALREADY_EXISTS = "METASTORE_ALREADY_EXISTS" + METASTORE_DOES_NOT_EXIST = "METASTORE_DOES_NOT_EXIST" + METASTORE_NOT_EMPTY = "METASTORE_NOT_EMPTY" + NOT_FOUND = "NOT_FOUND" + NOT_IMPLEMENTED = "NOT_IMPLEMENTED" + PARTIAL_DELETE = "PARTIAL_DELETE" + PERMISSION_DENIED = "PERMISSION_DENIED" + PERMISSION_NOT_PROPAGATED = "PERMISSION_NOT_PROPAGATED" + PRINCIPAL_DOES_NOT_EXIST = "PRINCIPAL_DOES_NOT_EXIST" + PROJECTS_OPERATION_TIMEOUT = "PROJECTS_OPERATION_TIMEOUT" + PROVIDER_ALREADY_EXISTS = "PROVIDER_ALREADY_EXISTS" + PROVIDER_DOES_NOT_EXIST = "PROVIDER_DOES_NOT_EXIST" + PROVIDER_SHARE_NOT_ACCESSIBLE = "PROVIDER_SHARE_NOT_ACCESSIBLE" + QUOTA_EXCEEDED = "QUOTA_EXCEEDED" + RECIPIENT_ALREADY_EXISTS = "RECIPIENT_ALREADY_EXISTS" + RECIPIENT_DOES_NOT_EXIST = "RECIPIENT_DOES_NOT_EXIST" + REQUEST_LIMIT_EXCEEDED = "REQUEST_LIMIT_EXCEEDED" + RESOURCE_ALREADY_EXISTS = "RESOURCE_ALREADY_EXISTS" + RESOURCE_CONFLICT = "RESOURCE_CONFLICT" + RESOURCE_DOES_NOT_EXIST = "RESOURCE_DOES_NOT_EXIST" + RESOURCE_EXHAUSTED = "RESOURCE_EXHAUSTED" + RESOURCE_LIMIT_EXCEEDED = "RESOURCE_LIMIT_EXCEEDED" + SCHEMA_ALREADY_EXISTS = "SCHEMA_ALREADY_EXISTS" + SCHEMA_DOES_NOT_EXIST = "SCHEMA_DOES_NOT_EXIST" + SCHEMA_NOT_EMPTY = "SCHEMA_NOT_EMPTY" + SEARCH_QUERY_TOO_LONG = "SEARCH_QUERY_TOO_LONG" + SEARCH_QUERY_TOO_SHORT = "SEARCH_QUERY_TOO_SHORT" + SERVICE_UNDER_MAINTENANCE = "SERVICE_UNDER_MAINTENANCE" + SHARE_ALREADY_EXISTS = "SHARE_ALREADY_EXISTS" + SHARE_DOES_NOT_EXIST = "SHARE_DOES_NOT_EXIST" + STORAGE_CREDENTIAL_ALREADY_EXISTS = "STORAGE_CREDENTIAL_ALREADY_EXISTS" + STORAGE_CREDENTIAL_DOES_NOT_EXIST = "STORAGE_CREDENTIAL_DOES_NOT_EXIST" + TABLE_ALREADY_EXISTS = "TABLE_ALREADY_EXISTS" + TABLE_DOES_NOT_EXIST = "TABLE_DOES_NOT_EXIST" + TEMPORARILY_UNAVAILABLE = "TEMPORARILY_UNAVAILABLE" + UNAUTHENTICATED = "UNAUTHENTICATED" + UNAVAILABLE = "UNAVAILABLE" + UNKNOWN = "UNKNOWN" + UNPARSEABLE_HTTP_ERROR = "UNPARSEABLE_HTTP_ERROR" + WORKSPACE_TEMPORARILY_UNAVAILABLE = "WORKSPACE_TEMPORARILY_UNAVAILABLE" + + +@dataclass +class ListDatabaseBranchesResponse: + database_branches: Optional[List[DatabaseBranch]] = None + """List of branches.""" + + next_page_token: Optional[str] = None + """Pagination token to request the next page of instances.""" + + def as_dict(self) -> dict: + """Serializes the ListDatabaseBranchesResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.database_branches: + body["database_branches"] = [v.as_dict() for v in self.database_branches] + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ListDatabaseBranchesResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.database_branches: + body["database_branches"] = self.database_branches + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ListDatabaseBranchesResponse: + """Deserializes the ListDatabaseBranchesResponse from a dictionary.""" + return cls( + database_branches=_repeated_dict(d, "database_branches", DatabaseBranch), + next_page_token=d.get("next_page_token", None), + ) + + +@dataclass +class ListDatabaseEndpointsResponse: + database_endpoints: Optional[List[DatabaseEndpoint]] = None + """List of endpoints.""" + + next_page_token: Optional[str] = None + """Pagination token to request the next page of instances.""" + + def as_dict(self) -> dict: + """Serializes the ListDatabaseEndpointsResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.database_endpoints: + body["database_endpoints"] = [v.as_dict() for v in self.database_endpoints] + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ListDatabaseEndpointsResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.database_endpoints: + body["database_endpoints"] = self.database_endpoints + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ListDatabaseEndpointsResponse: + """Deserializes the ListDatabaseEndpointsResponse from a dictionary.""" + return cls( + database_endpoints=_repeated_dict(d, "database_endpoints", DatabaseEndpoint), + next_page_token=d.get("next_page_token", None), + ) + + +@dataclass +class ListDatabaseProjectsResponse: + database_projects: Optional[List[DatabaseProject]] = None + """List of projects.""" + + next_page_token: Optional[str] = None + """Pagination token to request the next page of instances.""" + + def as_dict(self) -> dict: + """Serializes the ListDatabaseProjectsResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.database_projects: + body["database_projects"] = [v.as_dict() for v in self.database_projects] + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ListDatabaseProjectsResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.database_projects: + body["database_projects"] = self.database_projects + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ListDatabaseProjectsResponse: + """Deserializes the ListDatabaseProjectsResponse from a dictionary.""" + return cls( + database_projects=_repeated_dict(d, "database_projects", DatabaseProject), + next_page_token=d.get("next_page_token", None), + ) + + +@dataclass +class Operation: + """This resource represents a long-running operation that is the result of a network API call.""" + + done: Optional[bool] = None + """If the value is `false`, it means the operation is still in progress. If `true`, the operation + is completed, and either `error` or `response` is available.""" + + error: Optional[DatabricksServiceExceptionWithDetailsProto] = None + """The error result of the operation in case of failure or cancellation.""" + + metadata: Optional[dict] = None + """Service-specific metadata associated with the operation. It typically contains progress + information and common metadata such as create time. Some services might not provide such + metadata.""" + + name: Optional[str] = None + """The server-assigned name, which is only unique within the same service that originally returns + it. If you use the default HTTP mapping, the `name` should be a resource name ending with + `operations/{unique_id}`.""" + + response: Optional[dict] = None + """The normal, successful response of the operation.""" + + def as_dict(self) -> dict: + """Serializes the Operation into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.done is not None: + body["done"] = self.done + if self.error: + body["error"] = self.error.as_dict() + if self.metadata: + body["metadata"] = self.metadata + if self.name is not None: + body["name"] = self.name + if self.response: + body["response"] = self.response + return body + + def as_shallow_dict(self) -> dict: + """Serializes the Operation into a shallow dictionary of its immediate attributes.""" + body = {} + if self.done is not None: + body["done"] = self.done + if self.error: + body["error"] = self.error + if self.metadata: + body["metadata"] = self.metadata + if self.name is not None: + body["name"] = self.name + if self.response: + body["response"] = self.response + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> Operation: + """Deserializes the Operation from a dictionary.""" + return cls( + done=d.get("done", None), + error=_from_dict(d, "error", DatabricksServiceExceptionWithDetailsProto), + metadata=d.get("metadata", None), + name=d.get("name", None), + response=d.get("response", None), + ) + + +class PostgresAPI: + """The Postgres API provides access to a Postgres database via REST API or direct SQL.""" + + def __init__(self, api_client): + self._api = api_client + + def create_database_branch( + self, parent: str, database_branch: DatabaseBranch, *, database_branch_id: Optional[str] = None + ) -> CreateDatabaseBranchOperation: + """Create a Database Branch. + + :param parent: str + The Database Project where this Database Branch will be created. Format: projects/{project_id} + :param database_branch: :class:`DatabaseBranch` + The Database Branch to create. + :param database_branch_id: str (optional) + The ID to use for the Database Branch, which will become the final component of the branch's + resource name. + + This value should be 4-63 characters, and valid characters are /[a-z][0-9]-/. + + :returns: :class:`Operation` + """ + + body = database_branch.as_dict() + query = {} + if database_branch_id is not None: + query["database_branch_id"] = database_branch_id + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", f"/api/2.0/postgres/{parent}/branches", query=query, body=body, headers=headers) + operation = Operation.from_dict(res) + return CreateDatabaseBranchOperation(self, operation) + + def create_database_endpoint( + self, parent: str, database_endpoint: DatabaseEndpoint, *, database_endpoint_id: Optional[str] = None + ) -> CreateDatabaseEndpointOperation: + """Create a Database Endpoint. + + :param parent: str + The Database Branch where this Database Endpoint will be created. Format: + projects/{project_id}/branches/{branch_id} + :param database_endpoint: :class:`DatabaseEndpoint` + The Database Endpoint to create. + :param database_endpoint_id: str (optional) + The ID to use for the Database Endpoint, which will become the final component of the endpoint's + resource name. + + This value should be 4-63 characters, and valid characters are /[a-z][0-9]-/. + + :returns: :class:`Operation` + """ + + body = database_endpoint.as_dict() + query = {} + if database_endpoint_id is not None: + query["database_endpoint_id"] = database_endpoint_id + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", f"/api/2.0/postgres/{parent}/endpoints", query=query, body=body, headers=headers) + operation = Operation.from_dict(res) + return CreateDatabaseEndpointOperation(self, operation) + + def create_database_project( + self, database_project: DatabaseProject, *, database_project_id: Optional[str] = None + ) -> CreateDatabaseProjectOperation: + """Create a Database Project. + + :param database_project: :class:`DatabaseProject` + The Database Project to create + :param database_project_id: str (optional) + The ID to use for the Database Project, which will become the final component of the project's + resource name. + + This value should be 4-63 characters, and valid characters are /[a-z][0-9]-/. + + :returns: :class:`Operation` + """ + + body = database_project.as_dict() + query = {} + if database_project_id is not None: + query["database_project_id"] = database_project_id + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", "/api/2.0/postgres/projects", query=query, body=body, headers=headers) + operation = Operation.from_dict(res) + return CreateDatabaseProjectOperation(self, operation) + + def delete_database_branch(self, name: str): + """Delete a Database Branch. + + :param name: str + The name of the Database Branch to delete. Format: projects/{project_id}/branches/{branch_id} + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do("DELETE", f"/api/2.0/postgres/{name}", headers=headers) + + def delete_database_endpoint(self, name: str): + """Delete a Database Endpoint. + + :param name: str + The name of the Database Endpoint to delete. Format: + projects/{project_id}/branches/{branch_id}/endpoints/{endpoint_id} + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do("DELETE", f"/api/2.0/postgres/{name}", headers=headers) + + def delete_database_project(self, name: str): + """Delete a Database Project. + + :param name: str + The name of the Database Project to delete. Format: projects/{project_id} + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do("DELETE", f"/api/2.0/postgres/{name}", headers=headers) + + def get_database_branch(self, name: str) -> DatabaseBranch: + """Get a Database Branch. + + :param name: str + The name of the Database Branch to retrieve. Format: projects/{project_id}/branches/{branch_id} + + :returns: :class:`DatabaseBranch` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", f"/api/2.0/postgres/{name}", headers=headers) + return DatabaseBranch.from_dict(res) + + def get_database_endpoint(self, name: str) -> DatabaseEndpoint: + """Get a Database Endpoint. + + :param name: str + The name of the Database Endpoint to retrieve. Format: + projects/{project_id}/branches/{branch_id}/endpoints/{endpoint_id} + + :returns: :class:`DatabaseEndpoint` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", f"/api/2.0/postgres/{name}", headers=headers) + return DatabaseEndpoint.from_dict(res) + + def get_database_operation(self, name: str) -> Operation: + """Get a Database Operation. + + :param name: str + The name of the operation resource. + + :returns: :class:`Operation` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", f"/api/2.0/postgres/{name}", headers=headers) + return Operation.from_dict(res) + + def get_database_project(self, name: str) -> DatabaseProject: + """Get a Database Project. + + :param name: str + The name of the Database Project to retrieve. Format: projects/{project_id} + + :returns: :class:`DatabaseProject` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", f"/api/2.0/postgres/{name}", headers=headers) + return DatabaseProject.from_dict(res) + + def list_database_branches( + self, parent: str, *, page_size: Optional[int] = None, page_token: Optional[str] = None + ) -> Iterator[DatabaseBranch]: + """List Database Branches. + + :param parent: str + The Database Project, which owns this collection of branches. Format: projects/{project_id} + :param page_size: int (optional) + Upper bound for items returned. + :param page_token: str (optional) + Pagination token to go to the next page of Database Branches. Requests first page if absent. + + :returns: Iterator over :class:`DatabaseBranch` + """ + + query = {} + if page_size is not None: + query["page_size"] = page_size + if page_token is not None: + query["page_token"] = page_token + headers = { + "Accept": "application/json", + } + + while True: + json = self._api.do("GET", f"/api/2.0/postgres/{parent}/branches", query=query, headers=headers) + if "database_branches" in json: + for v in json["database_branches"]: + yield DatabaseBranch.from_dict(v) + if "next_page_token" not in json or not json["next_page_token"]: + return + query["page_token"] = json["next_page_token"] + + def list_database_endpoints( + self, parent: str, *, page_size: Optional[int] = None, page_token: Optional[str] = None + ) -> Iterator[DatabaseEndpoint]: + """List Database Endpoints. + + :param parent: str + The Database Branch, which owns this collection of endpoints. Format: + projects/{project_id}/branches/{branch_id} + :param page_size: int (optional) + Upper bound for items returned. + :param page_token: str (optional) + Pagination token to go to the next page of Database Branches. Requests first page if absent. + + :returns: Iterator over :class:`DatabaseEndpoint` + """ + + query = {} + if page_size is not None: + query["page_size"] = page_size + if page_token is not None: + query["page_token"] = page_token + headers = { + "Accept": "application/json", + } + + while True: + json = self._api.do("GET", f"/api/2.0/postgres/{parent}/endpoints", query=query, headers=headers) + if "database_endpoints" in json: + for v in json["database_endpoints"]: + yield DatabaseEndpoint.from_dict(v) + if "next_page_token" not in json or not json["next_page_token"]: + return + query["page_token"] = json["next_page_token"] + + def list_database_projects( + self, *, page_size: Optional[int] = None, page_token: Optional[str] = None + ) -> Iterator[DatabaseProject]: + """List Database Projects. + + :param page_size: int (optional) + Upper bound for items returned. + :param page_token: str (optional) + Pagination token to go to the next page of Database Projects. Requests first page if absent. + + :returns: Iterator over :class:`DatabaseProject` + """ + + query = {} + if page_size is not None: + query["page_size"] = page_size + if page_token is not None: + query["page_token"] = page_token + headers = { + "Accept": "application/json", + } + + while True: + json = self._api.do("GET", "/api/2.0/postgres/projects", query=query, headers=headers) + if "database_projects" in json: + for v in json["database_projects"]: + yield DatabaseProject.from_dict(v) + if "next_page_token" not in json or not json["next_page_token"]: + return + query["page_token"] = json["next_page_token"] + + def restart_database_endpoint(self, name: str) -> RestartDatabaseEndpointOperation: + """Restart a Database Endpoint. + + :param name: str + The name of the Database Endpoint to restart. Format: + projects/{project_id}/branches/{branch_id}/endpoints/{endpoint_id} + + :returns: :class:`Operation` + """ + + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", f"/api/2.0/postgres/{name}", headers=headers) + operation = Operation.from_dict(res) + return RestartDatabaseEndpointOperation(self, operation) + + def update_database_branch( + self, name: str, database_branch: DatabaseBranch, update_mask: str + ) -> UpdateDatabaseBranchOperation: + """Update a Database Branch. + + :param name: str + The resource name of the branch. Format: projects/{project_id}/branches/{branch_id} + :param database_branch: :class:`DatabaseBranch` + The Database Branch to update. + + The branch's `name` field is used to identify the branch to update. Format: + projects/{project_id}/branches/{branch_id} + :param update_mask: str + The list of fields to update. If unspecified, all fields will be updated when possible. + + :returns: :class:`Operation` + """ + + body = database_branch.as_dict() + query = {} + if update_mask is not None: + query["update_mask"] = update_mask + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("PATCH", f"/api/2.0/postgres/{name}", query=query, body=body, headers=headers) + operation = Operation.from_dict(res) + return UpdateDatabaseBranchOperation(self, operation) + + def update_database_endpoint( + self, name: str, database_endpoint: DatabaseEndpoint, update_mask: str + ) -> UpdateDatabaseEndpointOperation: + """Update a Database Endpoint. + + :param name: str + The resource name of the endpoint. Format: + projects/{project_id}/branches/{branch_id}/endpoints/{endpoint_id} + :param database_endpoint: :class:`DatabaseEndpoint` + The Database Endpoint to update. + + The endpoints's `name` field is used to identify the endpoint to update. Format: + projects/{project_id}/branches/{branch_id}/endpoints/{endpoint_id} + :param update_mask: str + The list of fields to update. If unspecified, all fields will be updated when possible. + + :returns: :class:`Operation` + """ + + body = database_endpoint.as_dict() + query = {} + if update_mask is not None: + query["update_mask"] = update_mask + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("PATCH", f"/api/2.0/postgres/{name}", query=query, body=body, headers=headers) + operation = Operation.from_dict(res) + return UpdateDatabaseEndpointOperation(self, operation) + + def update_database_project( + self, name: str, database_project: DatabaseProject, update_mask: str + ) -> UpdateDatabaseProjectOperation: + """Update a Database Project. + + :param name: str + The resource name of the project. Format: projects/{project_id} + :param database_project: :class:`DatabaseProject` + The Database Project to update. + + The project's `name` field is used to identify the project to update. Format: projects/{project_id} + :param update_mask: str + The list of fields to update. If unspecified, all fields will be updated when possible. + + :returns: :class:`Operation` + """ + + body = database_project.as_dict() + query = {} + if update_mask is not None: + query["update_mask"] = update_mask + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("PATCH", f"/api/2.0/postgres/{name}", query=query, body=body, headers=headers) + operation = Operation.from_dict(res) + return UpdateDatabaseProjectOperation(self, operation) + + +class CreateDatabaseBranchOperation: + """Long-running operation for create_database_branch""" + + def __init__(self, impl: PostgresAPI, operation: Operation): + self._impl = impl + self._operation = operation + + def wait(self, opts: Optional[lro.LroOptions] = None) -> DatabaseBranch: + """Wait blocks until the long-running operation is completed. If no timeout is + specified, this will poll indefinitely. If a timeout is provided and the operation + didn't finish within the timeout, this function will raise an error of type + TimeoutError, otherwise returns successful response and any errors encountered. + + :param opts: :class:`LroOptions` + Timeout options (default: polls indefinitely) + + :returns: :class:`DatabaseBranch` + """ + + def poll_operation(): + operation = self._impl.get_database_operation(name=self._operation.name) + + # Update local operation state + self._operation = operation + + if not operation.done: + return None, RetryError.continues("operation still in progress") + + if operation.error: + error_msg = operation.error.message if operation.error.message else "unknown error" + if operation.error.error_code: + error_msg = f"[{operation.error.error_code}] {error_msg}" + return None, RetryError.halt(Exception(f"operation failed: {error_msg}")) + + # Operation completed successfully, unmarshal response. + if operation.response is None: + return None, RetryError.halt(Exception("operation completed but no response available")) + + database_branch = DatabaseBranch.from_dict(operation.response) + + return database_branch, None + + return poll(poll_operation, timeout=opts.timeout if opts is not None else None) + + def name(self) -> str: + """Name returns the name of the long-running operation. The name is assigned + by the server and is unique within the service from which the operation is created. + + :returns: str + """ + return self._operation.name + + def metadata(self) -> DatabaseBranchOperationMetadata: + """Metadata returns metadata associated with the long-running operation. + If the metadata is not available, the returned metadata is None. + + :returns: :class:`DatabaseBranchOperationMetadata` or None + """ + if self._operation.metadata is None: + return None + + return DatabaseBranchOperationMetadata.from_dict(self._operation.metadata) + + def done(self) -> bool: + """Done reports whether the long-running operation has completed. + + :returns: bool + """ + # Refresh the operation state first + operation = self._impl.get_database_operation(name=self._operation.name) + + # Update local operation state + self._operation = operation + + return operation.done + + +class CreateDatabaseEndpointOperation: + """Long-running operation for create_database_endpoint""" + + def __init__(self, impl: PostgresAPI, operation: Operation): + self._impl = impl + self._operation = operation + + def wait(self, opts: Optional[lro.LroOptions] = None) -> DatabaseEndpoint: + """Wait blocks until the long-running operation is completed. If no timeout is + specified, this will poll indefinitely. If a timeout is provided and the operation + didn't finish within the timeout, this function will raise an error of type + TimeoutError, otherwise returns successful response and any errors encountered. + + :param opts: :class:`LroOptions` + Timeout options (default: polls indefinitely) + + :returns: :class:`DatabaseEndpoint` + """ + + def poll_operation(): + operation = self._impl.get_database_operation(name=self._operation.name) + + # Update local operation state + self._operation = operation + + if not operation.done: + return None, RetryError.continues("operation still in progress") + + if operation.error: + error_msg = operation.error.message if operation.error.message else "unknown error" + if operation.error.error_code: + error_msg = f"[{operation.error.error_code}] {error_msg}" + return None, RetryError.halt(Exception(f"operation failed: {error_msg}")) + + # Operation completed successfully, unmarshal response. + if operation.response is None: + return None, RetryError.halt(Exception("operation completed but no response available")) + + database_endpoint = DatabaseEndpoint.from_dict(operation.response) + + return database_endpoint, None + + return poll(poll_operation, timeout=opts.timeout if opts is not None else None) + + def name(self) -> str: + """Name returns the name of the long-running operation. The name is assigned + by the server and is unique within the service from which the operation is created. + + :returns: str + """ + return self._operation.name + + def metadata(self) -> DatabaseEndpointOperationMetadata: + """Metadata returns metadata associated with the long-running operation. + If the metadata is not available, the returned metadata is None. + + :returns: :class:`DatabaseEndpointOperationMetadata` or None + """ + if self._operation.metadata is None: + return None + + return DatabaseEndpointOperationMetadata.from_dict(self._operation.metadata) + + def done(self) -> bool: + """Done reports whether the long-running operation has completed. + + :returns: bool + """ + # Refresh the operation state first + operation = self._impl.get_database_operation(name=self._operation.name) + + # Update local operation state + self._operation = operation + + return operation.done + + +class CreateDatabaseProjectOperation: + """Long-running operation for create_database_project""" + + def __init__(self, impl: PostgresAPI, operation: Operation): + self._impl = impl + self._operation = operation + + def wait(self, opts: Optional[lro.LroOptions] = None) -> DatabaseProject: + """Wait blocks until the long-running operation is completed. If no timeout is + specified, this will poll indefinitely. If a timeout is provided and the operation + didn't finish within the timeout, this function will raise an error of type + TimeoutError, otherwise returns successful response and any errors encountered. + + :param opts: :class:`LroOptions` + Timeout options (default: polls indefinitely) + + :returns: :class:`DatabaseProject` + """ + + def poll_operation(): + operation = self._impl.get_database_operation(name=self._operation.name) + + # Update local operation state + self._operation = operation + + if not operation.done: + return None, RetryError.continues("operation still in progress") + + if operation.error: + error_msg = operation.error.message if operation.error.message else "unknown error" + if operation.error.error_code: + error_msg = f"[{operation.error.error_code}] {error_msg}" + return None, RetryError.halt(Exception(f"operation failed: {error_msg}")) + + # Operation completed successfully, unmarshal response. + if operation.response is None: + return None, RetryError.halt(Exception("operation completed but no response available")) + + database_project = DatabaseProject.from_dict(operation.response) + + return database_project, None + + return poll(poll_operation, timeout=opts.timeout if opts is not None else None) + + def name(self) -> str: + """Name returns the name of the long-running operation. The name is assigned + by the server and is unique within the service from which the operation is created. + + :returns: str + """ + return self._operation.name + + def metadata(self) -> DatabaseProjectOperationMetadata: + """Metadata returns metadata associated with the long-running operation. + If the metadata is not available, the returned metadata is None. + + :returns: :class:`DatabaseProjectOperationMetadata` or None + """ + if self._operation.metadata is None: + return None + + return DatabaseProjectOperationMetadata.from_dict(self._operation.metadata) + + def done(self) -> bool: + """Done reports whether the long-running operation has completed. + + :returns: bool + """ + # Refresh the operation state first + operation = self._impl.get_database_operation(name=self._operation.name) + + # Update local operation state + self._operation = operation + + return operation.done + + +class RestartDatabaseEndpointOperation: + """Long-running operation for restart_database_endpoint""" + + def __init__(self, impl: PostgresAPI, operation: Operation): + self._impl = impl + self._operation = operation + + def wait(self, opts: Optional[lro.LroOptions] = None) -> DatabaseEndpoint: + """Wait blocks until the long-running operation is completed. If no timeout is + specified, this will poll indefinitely. If a timeout is provided and the operation + didn't finish within the timeout, this function will raise an error of type + TimeoutError, otherwise returns successful response and any errors encountered. + + :param opts: :class:`LroOptions` + Timeout options (default: polls indefinitely) + + :returns: :class:`DatabaseEndpoint` + """ + + def poll_operation(): + operation = self._impl.get_database_operation(name=self._operation.name) + + # Update local operation state + self._operation = operation + + if not operation.done: + return None, RetryError.continues("operation still in progress") + + if operation.error: + error_msg = operation.error.message if operation.error.message else "unknown error" + if operation.error.error_code: + error_msg = f"[{operation.error.error_code}] {error_msg}" + return None, RetryError.halt(Exception(f"operation failed: {error_msg}")) + + # Operation completed successfully, unmarshal response. + if operation.response is None: + return None, RetryError.halt(Exception("operation completed but no response available")) + + database_endpoint = DatabaseEndpoint.from_dict(operation.response) + + return database_endpoint, None + + return poll(poll_operation, timeout=opts.timeout if opts is not None else None) + + def name(self) -> str: + """Name returns the name of the long-running operation. The name is assigned + by the server and is unique within the service from which the operation is created. + + :returns: str + """ + return self._operation.name + + def metadata(self) -> DatabaseEndpointOperationMetadata: + """Metadata returns metadata associated with the long-running operation. + If the metadata is not available, the returned metadata is None. + + :returns: :class:`DatabaseEndpointOperationMetadata` or None + """ + if self._operation.metadata is None: + return None + + return DatabaseEndpointOperationMetadata.from_dict(self._operation.metadata) + + def done(self) -> bool: + """Done reports whether the long-running operation has completed. + + :returns: bool + """ + # Refresh the operation state first + operation = self._impl.get_database_operation(name=self._operation.name) + + # Update local operation state + self._operation = operation + + return operation.done + + +class UpdateDatabaseBranchOperation: + """Long-running operation for update_database_branch""" + + def __init__(self, impl: PostgresAPI, operation: Operation): + self._impl = impl + self._operation = operation + + def wait(self, opts: Optional[lro.LroOptions] = None) -> DatabaseBranch: + """Wait blocks until the long-running operation is completed. If no timeout is + specified, this will poll indefinitely. If a timeout is provided and the operation + didn't finish within the timeout, this function will raise an error of type + TimeoutError, otherwise returns successful response and any errors encountered. + + :param opts: :class:`LroOptions` + Timeout options (default: polls indefinitely) + + :returns: :class:`DatabaseBranch` + """ + + def poll_operation(): + operation = self._impl.get_database_operation(name=self._operation.name) + + # Update local operation state + self._operation = operation + + if not operation.done: + return None, RetryError.continues("operation still in progress") + + if operation.error: + error_msg = operation.error.message if operation.error.message else "unknown error" + if operation.error.error_code: + error_msg = f"[{operation.error.error_code}] {error_msg}" + return None, RetryError.halt(Exception(f"operation failed: {error_msg}")) + + # Operation completed successfully, unmarshal response. + if operation.response is None: + return None, RetryError.halt(Exception("operation completed but no response available")) + + database_branch = DatabaseBranch.from_dict(operation.response) + + return database_branch, None + + return poll(poll_operation, timeout=opts.timeout if opts is not None else None) + + def name(self) -> str: + """Name returns the name of the long-running operation. The name is assigned + by the server and is unique within the service from which the operation is created. + + :returns: str + """ + return self._operation.name + + def metadata(self) -> DatabaseBranchOperationMetadata: + """Metadata returns metadata associated with the long-running operation. + If the metadata is not available, the returned metadata is None. + + :returns: :class:`DatabaseBranchOperationMetadata` or None + """ + if self._operation.metadata is None: + return None + + return DatabaseBranchOperationMetadata.from_dict(self._operation.metadata) + + def done(self) -> bool: + """Done reports whether the long-running operation has completed. + + :returns: bool + """ + # Refresh the operation state first + operation = self._impl.get_database_operation(name=self._operation.name) + + # Update local operation state + self._operation = operation + + return operation.done + + +class UpdateDatabaseEndpointOperation: + """Long-running operation for update_database_endpoint""" + + def __init__(self, impl: PostgresAPI, operation: Operation): + self._impl = impl + self._operation = operation + + def wait(self, opts: Optional[lro.LroOptions] = None) -> DatabaseEndpoint: + """Wait blocks until the long-running operation is completed. If no timeout is + specified, this will poll indefinitely. If a timeout is provided and the operation + didn't finish within the timeout, this function will raise an error of type + TimeoutError, otherwise returns successful response and any errors encountered. + + :param opts: :class:`LroOptions` + Timeout options (default: polls indefinitely) + + :returns: :class:`DatabaseEndpoint` + """ + + def poll_operation(): + operation = self._impl.get_database_operation(name=self._operation.name) + + # Update local operation state + self._operation = operation + + if not operation.done: + return None, RetryError.continues("operation still in progress") + + if operation.error: + error_msg = operation.error.message if operation.error.message else "unknown error" + if operation.error.error_code: + error_msg = f"[{operation.error.error_code}] {error_msg}" + return None, RetryError.halt(Exception(f"operation failed: {error_msg}")) + + # Operation completed successfully, unmarshal response. + if operation.response is None: + return None, RetryError.halt(Exception("operation completed but no response available")) + + database_endpoint = DatabaseEndpoint.from_dict(operation.response) + + return database_endpoint, None + + return poll(poll_operation, timeout=opts.timeout if opts is not None else None) + + def name(self) -> str: + """Name returns the name of the long-running operation. The name is assigned + by the server and is unique within the service from which the operation is created. + + :returns: str + """ + return self._operation.name + + def metadata(self) -> DatabaseEndpointOperationMetadata: + """Metadata returns metadata associated with the long-running operation. + If the metadata is not available, the returned metadata is None. + + :returns: :class:`DatabaseEndpointOperationMetadata` or None + """ + if self._operation.metadata is None: + return None + + return DatabaseEndpointOperationMetadata.from_dict(self._operation.metadata) + + def done(self) -> bool: + """Done reports whether the long-running operation has completed. + + :returns: bool + """ + # Refresh the operation state first + operation = self._impl.get_database_operation(name=self._operation.name) + + # Update local operation state + self._operation = operation + + return operation.done + + +class UpdateDatabaseProjectOperation: + """Long-running operation for update_database_project""" + + def __init__(self, impl: PostgresAPI, operation: Operation): + self._impl = impl + self._operation = operation + + def wait(self, opts: Optional[lro.LroOptions] = None) -> DatabaseProject: + """Wait blocks until the long-running operation is completed. If no timeout is + specified, this will poll indefinitely. If a timeout is provided and the operation + didn't finish within the timeout, this function will raise an error of type + TimeoutError, otherwise returns successful response and any errors encountered. + + :param opts: :class:`LroOptions` + Timeout options (default: polls indefinitely) + + :returns: :class:`DatabaseProject` + """ + + def poll_operation(): + operation = self._impl.get_database_operation(name=self._operation.name) + + # Update local operation state + self._operation = operation + + if not operation.done: + return None, RetryError.continues("operation still in progress") + + if operation.error: + error_msg = operation.error.message if operation.error.message else "unknown error" + if operation.error.error_code: + error_msg = f"[{operation.error.error_code}] {error_msg}" + return None, RetryError.halt(Exception(f"operation failed: {error_msg}")) + + # Operation completed successfully, unmarshal response. + if operation.response is None: + return None, RetryError.halt(Exception("operation completed but no response available")) + + database_project = DatabaseProject.from_dict(operation.response) + + return database_project, None + + return poll(poll_operation, timeout=opts.timeout if opts is not None else None) + + def name(self) -> str: + """Name returns the name of the long-running operation. The name is assigned + by the server and is unique within the service from which the operation is created. + + :returns: str + """ + return self._operation.name + + def metadata(self) -> DatabaseProjectOperationMetadata: + """Metadata returns metadata associated with the long-running operation. + If the metadata is not available, the returned metadata is None. + + :returns: :class:`DatabaseProjectOperationMetadata` or None + """ + if self._operation.metadata is None: + return None + + return DatabaseProjectOperationMetadata.from_dict(self._operation.metadata) + + def done(self) -> bool: + """Done reports whether the long-running operation has completed. + + :returns: bool + """ + # Refresh the operation state first + operation = self._impl.get_database_operation(name=self._operation.name) + + # Update local operation state + self._operation = operation + + return operation.done diff --git a/databricks/sdk/service/qualitymonitorv2.py b/databricks/sdk/service/qualitymonitorv2.py index 322989fc9..2a2120aac 100755 --- a/databricks/sdk/service/qualitymonitorv2.py +++ b/databricks/sdk/service/qualitymonitorv2.py @@ -17,6 +17,9 @@ @dataclass class AnomalyDetectionConfig: + job_type: Optional[AnomalyDetectionJobType] = None + """The type of the last run of the workflow.""" + last_run_id: Optional[str] = None """Run id of the last run of the workflow""" @@ -26,6 +29,8 @@ class AnomalyDetectionConfig: def as_dict(self) -> dict: """Serializes the AnomalyDetectionConfig into a dictionary suitable for use as a JSON request body.""" body = {} + if self.job_type is not None: + body["job_type"] = self.job_type.value if self.last_run_id is not None: body["last_run_id"] = self.last_run_id if self.latest_run_status is not None: @@ -35,6 +40,8 @@ def as_dict(self) -> dict: def as_shallow_dict(self) -> dict: """Serializes the AnomalyDetectionConfig into a shallow dictionary of its immediate attributes.""" body = {} + if self.job_type is not None: + body["job_type"] = self.job_type if self.last_run_id is not None: body["last_run_id"] = self.last_run_id if self.latest_run_status is not None: @@ -45,11 +52,18 @@ def as_shallow_dict(self) -> dict: def from_dict(cls, d: Dict[str, Any]) -> AnomalyDetectionConfig: """Deserializes the AnomalyDetectionConfig from a dictionary.""" return cls( + job_type=_enum(d, "job_type", AnomalyDetectionJobType), last_run_id=d.get("last_run_id", None), latest_run_status=_enum(d, "latest_run_status", AnomalyDetectionRunStatus), ) +class AnomalyDetectionJobType(Enum): + + ANOMALY_DETECTION_JOB_TYPE_INTERNAL_HIDDEN = "ANOMALY_DETECTION_JOB_TYPE_INTERNAL_HIDDEN" + ANOMALY_DETECTION_JOB_TYPE_NORMAL = "ANOMALY_DETECTION_JOB_TYPE_NORMAL" + + class AnomalyDetectionRunStatus(Enum): """Status of Anomaly Detection Job Run""" diff --git a/databricks/sdk/service/settings.py b/databricks/sdk/service/settings.py index c6126a23e..e95900fae 100755 --- a/databricks/sdk/service/settings.py +++ b/databricks/sdk/service/settings.py @@ -846,6 +846,10 @@ class CreatePrivateEndpointRule: """The full target AWS endpoint service name that connects to the destination resources of the private endpoint.""" + error_message: Optional[str] = None + + gcp_endpoint_spec: Optional[GcpEndpointSpec] = None + group_id: Optional[str] = None """Not used by customer-managed private endpoint services. @@ -869,6 +873,10 @@ def as_dict(self) -> dict: body["domain_names"] = [v for v in self.domain_names] if self.endpoint_service is not None: body["endpoint_service"] = self.endpoint_service + if self.error_message is not None: + body["error_message"] = self.error_message + if self.gcp_endpoint_spec: + body["gcp_endpoint_spec"] = self.gcp_endpoint_spec.as_dict() if self.group_id is not None: body["group_id"] = self.group_id if self.resource_id is not None: @@ -884,6 +892,10 @@ def as_shallow_dict(self) -> dict: body["domain_names"] = self.domain_names if self.endpoint_service is not None: body["endpoint_service"] = self.endpoint_service + if self.error_message is not None: + body["error_message"] = self.error_message + if self.gcp_endpoint_spec: + body["gcp_endpoint_spec"] = self.gcp_endpoint_spec if self.group_id is not None: body["group_id"] = self.group_id if self.resource_id is not None: @@ -898,6 +910,8 @@ def from_dict(cls, d: Dict[str, Any]) -> CreatePrivateEndpointRule: return cls( domain_names=d.get("domain_names", None), endpoint_service=d.get("endpoint_service", None), + error_message=d.get("error_message", None), + gcp_endpoint_spec=_from_dict(d, "gcp_endpoint_spec", GcpEndpointSpec), group_id=d.get("group_id", None), resource_id=d.get("resource_id", None), resource_names=d.get("resource_names", None), @@ -1180,6 +1194,8 @@ def from_dict(cls, d: Dict[str, Any]) -> CustomerFacingNetworkConnectivityConfig class CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionState(Enum): + CREATE_FAILED = "CREATE_FAILED" + CREATING = "CREATING" DISCONNECTED = "DISCONNECTED" ESTABLISHED = "ESTABLISHED" EXPIRED = "EXPIRED" @@ -2880,6 +2896,41 @@ def from_dict(cls, d: Dict[str, Any]) -> FetchIpAccessListResponse: return cls(ip_access_list=_from_dict(d, "ip_access_list", IpAccessListInfo)) +@dataclass +class GcpEndpointSpec: + psc_endpoint_uri: Optional[str] = None + """Output only. The URI of the created PSC endpoint.""" + + service_attachment: Optional[str] = None + """The full url of the target service attachment. Example: + projects/my-gcp-project/regions/us-east4/serviceAttachments/my-service-attachment""" + + def as_dict(self) -> dict: + """Serializes the GcpEndpointSpec into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.psc_endpoint_uri is not None: + body["psc_endpoint_uri"] = self.psc_endpoint_uri + if self.service_attachment is not None: + body["service_attachment"] = self.service_attachment + return body + + def as_shallow_dict(self) -> dict: + """Serializes the GcpEndpointSpec into a shallow dictionary of its immediate attributes.""" + body = {} + if self.psc_endpoint_uri is not None: + body["psc_endpoint_uri"] = self.psc_endpoint_uri + if self.service_attachment is not None: + body["service_attachment"] = self.service_attachment + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> GcpEndpointSpec: + """Deserializes the GcpEndpointSpec from a dictionary.""" + return cls( + psc_endpoint_uri=d.get("psc_endpoint_uri", None), service_attachment=d.get("service_attachment", None) + ) + + @dataclass class GenericWebhookConfig: password: Optional[str] = None @@ -3825,6 +3876,8 @@ def from_dict(cls, d: Dict[str, Any]) -> NccAzurePrivateEndpointRule: class NccAzurePrivateEndpointRuleConnectionState(Enum): + CREATE_FAILED = "CREATE_FAILED" + CREATING = "CREATING" DISCONNECTED = "DISCONNECTED" ESTABLISHED = "ESTABLISHED" EXPIRED = "EXPIRED" @@ -3926,6 +3979,8 @@ class NccEgressDefaultRules: azure_service_endpoint_rule: Optional[NccAzureServiceEndpointRule] = None + gcp_project_id_rule: Optional[NetworkConnectivityConfigEgressConfigDefaultRuleGcpProjectIdRule] = None + def as_dict(self) -> dict: """Serializes the NccEgressDefaultRules into a dictionary suitable for use as a JSON request body.""" body = {} @@ -3933,6 +3988,8 @@ def as_dict(self) -> dict: body["aws_stable_ip_rule"] = self.aws_stable_ip_rule.as_dict() if self.azure_service_endpoint_rule: body["azure_service_endpoint_rule"] = self.azure_service_endpoint_rule.as_dict() + if self.gcp_project_id_rule: + body["gcp_project_id_rule"] = self.gcp_project_id_rule.as_dict() return body def as_shallow_dict(self) -> dict: @@ -3942,6 +3999,8 @@ def as_shallow_dict(self) -> dict: body["aws_stable_ip_rule"] = self.aws_stable_ip_rule if self.azure_service_endpoint_rule: body["azure_service_endpoint_rule"] = self.azure_service_endpoint_rule + if self.gcp_project_id_rule: + body["gcp_project_id_rule"] = self.gcp_project_id_rule return body @classmethod @@ -3950,6 +4009,9 @@ def from_dict(cls, d: Dict[str, Any]) -> NccEgressDefaultRules: return cls( aws_stable_ip_rule=_from_dict(d, "aws_stable_ip_rule", NccAwsStableIpRule), azure_service_endpoint_rule=_from_dict(d, "azure_service_endpoint_rule", NccAzureServiceEndpointRule), + gcp_project_id_rule=_from_dict( + d, "gcp_project_id_rule", NetworkConnectivityConfigEgressConfigDefaultRuleGcpProjectIdRule + ), ) @@ -4037,6 +4099,10 @@ class NccPrivateEndpointRule: """The full target AWS endpoint service name that connects to the destination resources of the private endpoint.""" + error_message: Optional[str] = None + + gcp_endpoint_spec: Optional[GcpEndpointSpec] = None + group_id: Optional[str] = None """Not used by customer-managed private endpoint services. @@ -4087,6 +4153,10 @@ def as_dict(self) -> dict: body["endpoint_name"] = self.endpoint_name if self.endpoint_service is not None: body["endpoint_service"] = self.endpoint_service + if self.error_message is not None: + body["error_message"] = self.error_message + if self.gcp_endpoint_spec: + body["gcp_endpoint_spec"] = self.gcp_endpoint_spec.as_dict() if self.group_id is not None: body["group_id"] = self.group_id if self.network_connectivity_config_id is not None: @@ -4124,6 +4194,10 @@ def as_shallow_dict(self) -> dict: body["endpoint_name"] = self.endpoint_name if self.endpoint_service is not None: body["endpoint_service"] = self.endpoint_service + if self.error_message is not None: + body["error_message"] = self.error_message + if self.gcp_endpoint_spec: + body["gcp_endpoint_spec"] = self.gcp_endpoint_spec if self.group_id is not None: body["group_id"] = self.group_id if self.network_connectivity_config_id is not None: @@ -4153,6 +4227,8 @@ def from_dict(cls, d: Dict[str, Any]) -> NccPrivateEndpointRule: enabled=d.get("enabled", None), endpoint_name=d.get("endpoint_name", None), endpoint_service=d.get("endpoint_service", None), + error_message=d.get("error_message", None), + gcp_endpoint_spec=_from_dict(d, "gcp_endpoint_spec", GcpEndpointSpec), group_id=d.get("group_id", None), network_connectivity_config_id=d.get("network_connectivity_config_id", None), resource_id=d.get("resource_id", None), @@ -4174,6 +4250,32 @@ class NccPrivateEndpointRulePrivateLinkConnectionState(Enum): REJECTED = "REJECTED" +@dataclass +class NetworkConnectivityConfigEgressConfigDefaultRuleGcpProjectIdRule: + project_ids: Optional[List[str]] = None + """A list of Databricks internal project IDs from where network access originates for serverless + DBSQL, This list is stable and will not change once the NCC object is created.""" + + def as_dict(self) -> dict: + """Serializes the NetworkConnectivityConfigEgressConfigDefaultRuleGcpProjectIdRule into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.project_ids: + body["project_ids"] = [v for v in self.project_ids] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the NetworkConnectivityConfigEgressConfigDefaultRuleGcpProjectIdRule into a shallow dictionary of its immediate attributes.""" + body = {} + if self.project_ids: + body["project_ids"] = self.project_ids + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> NetworkConnectivityConfigEgressConfigDefaultRuleGcpProjectIdRule: + """Deserializes the NetworkConnectivityConfigEgressConfigDefaultRuleGcpProjectIdRule from a dictionary.""" + return cls(project_ids=d.get("project_ids", None)) + + @dataclass class NetworkConnectivityConfiguration: """Properties of the new network connectivity configuration.""" @@ -4495,6 +4597,12 @@ class PublicTokenInfo: expiry_time: Optional[int] = None """Server time (in epoch milliseconds) when the token will expire, or -1 if not applicable.""" + last_accessed_time: Optional[int] = None + """Server time (in epoch milliseconds) when the token was accessed most recently.""" + + scopes: Optional[List[str]] = None + """Scope of the token was created with, if applicable.""" + token_id: Optional[str] = None """The ID of this token.""" @@ -4507,6 +4615,10 @@ def as_dict(self) -> dict: body["creation_time"] = self.creation_time if self.expiry_time is not None: body["expiry_time"] = self.expiry_time + if self.last_accessed_time is not None: + body["last_accessed_time"] = self.last_accessed_time + if self.scopes: + body["scopes"] = [v for v in self.scopes] if self.token_id is not None: body["token_id"] = self.token_id return body @@ -4520,6 +4632,10 @@ def as_shallow_dict(self) -> dict: body["creation_time"] = self.creation_time if self.expiry_time is not None: body["expiry_time"] = self.expiry_time + if self.last_accessed_time is not None: + body["last_accessed_time"] = self.last_accessed_time + if self.scopes: + body["scopes"] = self.scopes if self.token_id is not None: body["token_id"] = self.token_id return body @@ -4531,6 +4647,8 @@ def from_dict(cls, d: Dict[str, Any]) -> PublicTokenInfo: comment=d.get("comment", None), creation_time=d.get("creation_time", None), expiry_time=d.get("expiry_time", None), + last_accessed_time=d.get("last_accessed_time", None), + scopes=d.get("scopes", None), token_id=d.get("token_id", None), ) @@ -4907,6 +5025,9 @@ class TokenInfo: owner_id: Optional[int] = None """User ID of the user that owns the token.""" + scopes: Optional[List[str]] = None + """Scope of the token was created with, if applicable.""" + token_id: Optional[str] = None """ID of the token.""" @@ -4930,6 +5051,8 @@ def as_dict(self) -> dict: body["last_used_day"] = self.last_used_day if self.owner_id is not None: body["owner_id"] = self.owner_id + if self.scopes: + body["scopes"] = [v for v in self.scopes] if self.token_id is not None: body["token_id"] = self.token_id if self.workspace_id is not None: @@ -4953,6 +5076,8 @@ def as_shallow_dict(self) -> dict: body["last_used_day"] = self.last_used_day if self.owner_id is not None: body["owner_id"] = self.owner_id + if self.scopes: + body["scopes"] = self.scopes if self.token_id is not None: body["token_id"] = self.token_id if self.workspace_id is not None: @@ -4970,6 +5095,7 @@ def from_dict(cls, d: Dict[str, Any]) -> TokenInfo: expiry_time=d.get("expiry_time", None), last_used_day=d.get("last_used_day", None), owner_id=d.get("owner_id", None), + scopes=d.get("scopes", None), token_id=d.get("token_id", None), workspace_id=d.get("workspace_id", None), ) @@ -5122,6 +5248,10 @@ class UpdatePrivateEndpointRule: Update this field to activate/deactivate this private endpoint to allow egress access from serverless compute resources.""" + error_message: Optional[str] = None + + gcp_endpoint_spec: Optional[GcpEndpointSpec] = None + resource_names: Optional[List[str]] = None """Only used by private endpoints towards AWS S3 service. @@ -5136,6 +5266,10 @@ def as_dict(self) -> dict: body["domain_names"] = [v for v in self.domain_names] if self.enabled is not None: body["enabled"] = self.enabled + if self.error_message is not None: + body["error_message"] = self.error_message + if self.gcp_endpoint_spec: + body["gcp_endpoint_spec"] = self.gcp_endpoint_spec.as_dict() if self.resource_names: body["resource_names"] = [v for v in self.resource_names] return body @@ -5147,6 +5281,10 @@ def as_shallow_dict(self) -> dict: body["domain_names"] = self.domain_names if self.enabled is not None: body["enabled"] = self.enabled + if self.error_message is not None: + body["error_message"] = self.error_message + if self.gcp_endpoint_spec: + body["gcp_endpoint_spec"] = self.gcp_endpoint_spec if self.resource_names: body["resource_names"] = self.resource_names return body @@ -5157,6 +5295,8 @@ def from_dict(cls, d: Dict[str, Any]) -> UpdatePrivateEndpointRule: return cls( domain_names=d.get("domain_names", None), enabled=d.get("enabled", None), + error_message=d.get("error_message", None), + gcp_endpoint_spec=_from_dict(d, "gcp_endpoint_spec", GcpEndpointSpec), resource_names=d.get("resource_names", None), ) @@ -8582,7 +8722,13 @@ class TokensAPI: def __init__(self, api_client): self._api = api_client - def create(self, *, comment: Optional[str] = None, lifetime_seconds: Optional[int] = None) -> CreateTokenResponse: + def create( + self, + *, + comment: Optional[str] = None, + lifetime_seconds: Optional[int] = None, + scopes: Optional[List[str]] = None, + ) -> CreateTokenResponse: """Creates and returns a token for a user. If this call is made through token authentication, it creates a token with the same client ID as the authenticated token. If the user's token quota is exceeded, this call returns an error **QUOTA_EXCEEDED**. @@ -8593,6 +8739,8 @@ def create(self, *, comment: Optional[str] = None, lifetime_seconds: Optional[in The lifetime of the token, in seconds. If the lifetime is not specified, this token remains valid indefinitely. + :param scopes: List[str] (optional) + Optional scopes of the token. :returns: :class:`CreateTokenResponse` """ @@ -8602,6 +8750,8 @@ def create(self, *, comment: Optional[str] = None, lifetime_seconds: Optional[in body["comment"] = comment if lifetime_seconds is not None: body["lifetime_seconds"] = lifetime_seconds + if scopes is not None: + body["scopes"] = [v for v in scopes] headers = { "Accept": "application/json", "Content-Type": "application/json", diff --git a/databricks/sdk/service/sharing.py b/databricks/sdk/service/sharing.py index bc2d78bc4..ba70ed120 100755 --- a/databricks/sdk/service/sharing.py +++ b/databricks/sdk/service/sharing.py @@ -1124,6 +1124,15 @@ class PermissionsChange: """The principal whose privileges we are changing. Only one of principal or principal_id should be specified, never both at the same time.""" + principal_id: Optional[int] = None + """An opaque internal ID that identifies the principal whose privileges should be removed. + + This field is intended for removing privileges associated with a deleted user. When set, only + the entries specified in the remove field are processed; any entries in the add field will be + rejected. + + Only one of principal or principal_id should be specified, never both at the same time.""" + remove: Optional[List[str]] = None """The set of privileges to remove.""" @@ -1134,6 +1143,8 @@ def as_dict(self) -> dict: body["add"] = [v for v in self.add] if self.principal is not None: body["principal"] = self.principal + if self.principal_id is not None: + body["principal_id"] = self.principal_id if self.remove: body["remove"] = [v for v in self.remove] return body @@ -1145,6 +1156,8 @@ def as_shallow_dict(self) -> dict: body["add"] = self.add if self.principal is not None: body["principal"] = self.principal + if self.principal_id is not None: + body["principal_id"] = self.principal_id if self.remove: body["remove"] = self.remove return body @@ -1152,7 +1165,12 @@ def as_shallow_dict(self) -> dict: @classmethod def from_dict(cls, d: Dict[str, Any]) -> PermissionsChange: """Deserializes the PermissionsChange from a dictionary.""" - return cls(add=d.get("add", None), principal=d.get("principal", None), remove=d.get("remove", None)) + return cls( + add=d.get("add", None), + principal=d.get("principal", None), + principal_id=d.get("principal_id", None), + remove=d.get("remove", None), + ) class Privilege(Enum): @@ -1210,6 +1228,10 @@ class PrivilegeAssignment: """The principal (user email address or group name). For deleted principals, `principal` is empty while `principal_id` is populated.""" + principal_id: Optional[int] = None + """Unique identifier of the principal. For active principals, both `principal` and `principal_id` + are present.""" + privileges: Optional[List[Privilege]] = None """The privileges assigned to the principal.""" @@ -1218,6 +1240,8 @@ def as_dict(self) -> dict: body = {} if self.principal is not None: body["principal"] = self.principal + if self.principal_id is not None: + body["principal_id"] = self.principal_id if self.privileges: body["privileges"] = [v.value for v in self.privileges] return body @@ -1227,6 +1251,8 @@ def as_shallow_dict(self) -> dict: body = {} if self.principal is not None: body["principal"] = self.principal + if self.principal_id is not None: + body["principal_id"] = self.principal_id if self.privileges: body["privileges"] = self.privileges return body @@ -1234,7 +1260,11 @@ def as_shallow_dict(self) -> dict: @classmethod def from_dict(cls, d: Dict[str, Any]) -> PrivilegeAssignment: """Deserializes the PrivilegeAssignment from a dictionary.""" - return cls(principal=d.get("principal", None), privileges=_repeated_enum(d, "privileges", Privilege)) + return cls( + principal=d.get("principal", None), + principal_id=d.get("principal_id", None), + privileges=_repeated_enum(d, "privileges", Privilege), + ) @dataclass @@ -1857,6 +1887,13 @@ class ShareInfo: owner: Optional[str] = None """Username of current owner of share.""" + replication_enabled: Optional[bool] = None + """Whether replication is enabled for this share.""" + + serverless_budget_policy_id: Optional[str] = None + """Serverless budget policy id (can only be created/updated when calling data-sharing service) + [Create,Update:IGN]""" + storage_location: Optional[str] = None """Storage Location URL (full path) for the share.""" @@ -1884,6 +1921,10 @@ def as_dict(self) -> dict: body["objects"] = [v.as_dict() for v in self.objects] if self.owner is not None: body["owner"] = self.owner + if self.replication_enabled is not None: + body["replication_enabled"] = self.replication_enabled + if self.serverless_budget_policy_id is not None: + body["serverless_budget_policy_id"] = self.serverless_budget_policy_id if self.storage_location is not None: body["storage_location"] = self.storage_location if self.storage_root is not None: @@ -1909,6 +1950,10 @@ def as_shallow_dict(self) -> dict: body["objects"] = self.objects if self.owner is not None: body["owner"] = self.owner + if self.replication_enabled is not None: + body["replication_enabled"] = self.replication_enabled + if self.serverless_budget_policy_id is not None: + body["serverless_budget_policy_id"] = self.serverless_budget_policy_id if self.storage_location is not None: body["storage_location"] = self.storage_location if self.storage_root is not None: @@ -1929,6 +1974,8 @@ def from_dict(cls, d: Dict[str, Any]) -> ShareInfo: name=d.get("name", None), objects=_repeated_dict(d, "objects", SharedDataObject), owner=d.get("owner", None), + replication_enabled=d.get("replication_enabled", None), + serverless_budget_policy_id=d.get("serverless_budget_policy_id", None), storage_location=d.get("storage_location", None), storage_root=d.get("storage_root", None), updated_at=d.get("updated_at", None), @@ -2972,6 +3019,45 @@ def list( return query["page_token"] = json["next_page_token"] + def update( + self, recipient_name: str, name: str, policy: FederationPolicy, *, update_mask: Optional[str] = None + ) -> FederationPolicy: + """Updates an existing federation policy for an OIDC_RECIPIENT. The caller must be the owner of the + recipient. + + :param recipient_name: str + Name of the recipient. This is the name of the recipient for which the policy is being updated. + :param name: str + Name of the policy. This is the name of the current name of the policy. + :param policy: :class:`FederationPolicy` + :param update_mask: str (optional) + The field mask specifies which fields of the policy to update. To specify multiple fields in the + field mask, use comma as the separator (no space). The special value '*' indicates that all fields + should be updated (full replacement). If unspecified, all fields that are set in the policy provided + in the update request will overwrite the corresponding fields in the existing policy. Example value: + 'comment,oidc_policy.audiences'. + + :returns: :class:`FederationPolicy` + """ + + body = policy.as_dict() + query = {} + if update_mask is not None: + query["update_mask"] = update_mask + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "PATCH", + f"/api/2.0/data-sharing/recipients/{recipient_name}/federation-policies/{name}", + query=query, + body=body, + headers=headers, + ) + return FederationPolicy.from_dict(res) + class RecipientsAPI: """A recipient is an object you create using :method:recipients/create to represent an organization which you @@ -3269,7 +3355,15 @@ class SharesAPI: def __init__(self, api_client): self._api = api_client - def create(self, name: str, *, comment: Optional[str] = None, storage_root: Optional[str] = None) -> ShareInfo: + def create( + self, + name: str, + *, + comment: Optional[str] = None, + replication_enabled: Optional[bool] = None, + serverless_budget_policy_id: Optional[str] = None, + storage_root: Optional[str] = None, + ) -> ShareInfo: """Creates a new share for data objects. Data objects can be added after creation with **update**. The caller must be a metastore admin or have the **CREATE_SHARE** privilege on the metastore. @@ -3277,6 +3371,11 @@ def create(self, name: str, *, comment: Optional[str] = None, storage_root: Opti Name of the share. :param comment: str (optional) User-provided free-form text description. + :param replication_enabled: bool (optional) + Whether replication is enabled for this share. + :param serverless_budget_policy_id: str (optional) + Serverless budget policy id (can only be created/updated when calling data-sharing service) + [Create,Update:IGN] :param storage_root: str (optional) Storage root URL for the share. @@ -3288,6 +3387,10 @@ def create(self, name: str, *, comment: Optional[str] = None, storage_root: Opti body["comment"] = comment if name is not None: body["name"] = name + if replication_enabled is not None: + body["replication_enabled"] = replication_enabled + if serverless_budget_policy_id is not None: + body["serverless_budget_policy_id"] = serverless_budget_policy_id if storage_root is not None: body["storage_root"] = storage_root headers = { @@ -3415,6 +3518,7 @@ def update( comment: Optional[str] = None, new_name: Optional[str] = None, owner: Optional[str] = None, + serverless_budget_policy_id: Optional[str] = None, storage_root: Optional[str] = None, updates: Optional[List[SharedDataObjectUpdate]] = None, ) -> ShareInfo: @@ -3442,6 +3546,9 @@ def update( New name for the share. :param owner: str (optional) Username of current owner of share. + :param serverless_budget_policy_id: str (optional) + Serverless budget policy id (can only be created/updated when calling data-sharing service) + [Create,Update:IGN] :param storage_root: str (optional) Storage root URL for the share. :param updates: List[:class:`SharedDataObjectUpdate`] (optional) @@ -3457,6 +3564,8 @@ def update( body["new_name"] = new_name if owner is not None: body["owner"] = owner + if serverless_budget_policy_id is not None: + body["serverless_budget_policy_id"] = serverless_budget_policy_id if storage_root is not None: body["storage_root"] = storage_root if updates is not None: diff --git a/databricks/sdk/service/sql.py b/databricks/sdk/service/sql.py index 2b839f687..25a9cf6e9 100755 --- a/databricks/sdk/service/sql.py +++ b/databricks/sdk/service/sql.py @@ -664,6 +664,9 @@ class AlertV2: custom_summary: Optional[str] = None """Custom summary for the alert. support mustache template.""" + effective_parent_path: Optional[str] = None + """The actual workspace path of the folder containing the alert. This is an output-only field.""" + effective_run_as: Optional[AlertV2RunAs] = None """The actual identity that will be used to execute the alert. This is an output-only field that shows the resolved run-as identity after applying permissions and defaults.""" @@ -708,6 +711,8 @@ def as_dict(self) -> dict: body["custom_summary"] = self.custom_summary if self.display_name is not None: body["display_name"] = self.display_name + if self.effective_parent_path is not None: + body["effective_parent_path"] = self.effective_parent_path if self.effective_run_as: body["effective_run_as"] = self.effective_run_as.as_dict() if self.evaluation: @@ -745,6 +750,8 @@ def as_shallow_dict(self) -> dict: body["custom_summary"] = self.custom_summary if self.display_name is not None: body["display_name"] = self.display_name + if self.effective_parent_path is not None: + body["effective_parent_path"] = self.effective_parent_path if self.effective_run_as: body["effective_run_as"] = self.effective_run_as if self.evaluation: @@ -779,6 +786,7 @@ def from_dict(cls, d: Dict[str, Any]) -> AlertV2: custom_description=d.get("custom_description", None), custom_summary=d.get("custom_summary", None), display_name=d.get("display_name", None), + effective_parent_path=d.get("effective_parent_path", None), effective_run_as=_from_dict(d, "effective_run_as", AlertV2RunAs), evaluation=_from_dict(d, "evaluation", AlertV2Evaluation), id=d.get("id", None), @@ -950,6 +958,7 @@ class AlertV2OperandColumn: name: str aggregation: Optional[Aggregation] = None + """If not set, the behavior is equivalent to using `First row` in the UI.""" display: Optional[str] = None @@ -1717,12 +1726,17 @@ class CronSchedule: https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html for details.""" + effective_pause_status: Optional[SchedulePauseStatus] = None + """The actual pause status of the schedule. This is an output-only field.""" + pause_status: Optional[SchedulePauseStatus] = None """Indicate whether this schedule is paused or not.""" def as_dict(self) -> dict: """Serializes the CronSchedule into a dictionary suitable for use as a JSON request body.""" body = {} + if self.effective_pause_status is not None: + body["effective_pause_status"] = self.effective_pause_status.value if self.pause_status is not None: body["pause_status"] = self.pause_status.value if self.quartz_cron_schedule is not None: @@ -1734,6 +1748,8 @@ def as_dict(self) -> dict: def as_shallow_dict(self) -> dict: """Serializes the CronSchedule into a shallow dictionary of its immediate attributes.""" body = {} + if self.effective_pause_status is not None: + body["effective_pause_status"] = self.effective_pause_status if self.pause_status is not None: body["pause_status"] = self.pause_status if self.quartz_cron_schedule is not None: @@ -1746,6 +1762,7 @@ def as_shallow_dict(self) -> dict: def from_dict(cls, d: Dict[str, Any]) -> CronSchedule: """Deserializes the CronSchedule from a dictionary.""" return cls( + effective_pause_status=_enum(d, "effective_pause_status", SchedulePauseStatus), pause_status=_enum(d, "pause_status", SchedulePauseStatus), quartz_cron_schedule=d.get("quartz_cron_schedule", None), timezone_id=d.get("timezone_id", None), diff --git a/databricks/sdk/service/tags.py b/databricks/sdk/service/tags.py index 91bdc27ef..a6711ab41 100755 --- a/databricks/sdk/service/tags.py +++ b/databricks/sdk/service/tags.py @@ -14,6 +14,40 @@ # all definitions in this file are in alphabetical order +@dataclass +class ListTagAssignmentsResponse: + next_page_token: Optional[str] = None + """Pagination token to request the next page of tag assignments""" + + tag_assignments: Optional[List[TagAssignment]] = None + + def as_dict(self) -> dict: + """Serializes the ListTagAssignmentsResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + if self.tag_assignments: + body["tag_assignments"] = [v.as_dict() for v in self.tag_assignments] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ListTagAssignmentsResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + if self.tag_assignments: + body["tag_assignments"] = self.tag_assignments + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ListTagAssignmentsResponse: + """Deserializes the ListTagAssignmentsResponse from a dictionary.""" + return cls( + next_page_token=d.get("next_page_token", None), + tag_assignments=_repeated_dict(d, "tag_assignments", TagAssignment), + ) + + @dataclass class ListTagPoliciesResponse: next_page_token: Optional[str] = None @@ -46,6 +80,57 @@ def from_dict(cls, d: Dict[str, Any]) -> ListTagPoliciesResponse: ) +@dataclass +class TagAssignment: + entity_type: str + """The type of entity to which the tag is assigned. Allowed value is dashboards""" + + entity_id: str + """The identifier of the entity to which the tag is assigned""" + + tag_key: str + """The key of the tag. The characters , . : / - = and leading/trailing spaces are not allowed""" + + tag_value: Optional[str] = None + """The value of the tag""" + + def as_dict(self) -> dict: + """Serializes the TagAssignment into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.entity_id is not None: + body["entity_id"] = self.entity_id + if self.entity_type is not None: + body["entity_type"] = self.entity_type + if self.tag_key is not None: + body["tag_key"] = self.tag_key + if self.tag_value is not None: + body["tag_value"] = self.tag_value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the TagAssignment into a shallow dictionary of its immediate attributes.""" + body = {} + if self.entity_id is not None: + body["entity_id"] = self.entity_id + if self.entity_type is not None: + body["entity_type"] = self.entity_type + if self.tag_key is not None: + body["tag_key"] = self.tag_key + if self.tag_value is not None: + body["tag_value"] = self.tag_value + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> TagAssignment: + """Deserializes the TagAssignment from a dictionary.""" + return cls( + entity_id=d.get("entity_id", None), + entity_type=d.get("entity_type", None), + tag_key=d.get("tag_key", None), + tag_value=d.get("tag_value", None), + ) + + @dataclass class TagPolicy: tag_key: str @@ -133,6 +218,154 @@ def from_dict(cls, d: Dict[str, Any]) -> Value: return cls(name=d.get("name", None)) +class TagAssignmentsAPI: + """Manage tag assignments on workspace-scoped objects.""" + + def __init__(self, api_client): + self._api = api_client + + def create_tag_assignment(self, tag_assignment: TagAssignment) -> TagAssignment: + """Create a tag assignment + + :param tag_assignment: :class:`TagAssignment` + + :returns: :class:`TagAssignment` + """ + + body = tag_assignment.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", "/api/2.0/entity-tag-assignments", body=body, headers=headers) + return TagAssignment.from_dict(res) + + def delete_tag_assignment(self, entity_type: str, entity_id: str, tag_key: str): + """Delete a tag assignment + + :param entity_type: str + The type of entity to which the tag is assigned. Allowed value is dashboards + :param entity_id: str + The identifier of the entity to which the tag is assigned + :param tag_key: str + The key of the tag. The characters , . : / - = and leading/trailing spaces are not allowed + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do( + "DELETE", f"/api/2.0/entity-tag-assignments/{entity_type}/{entity_id}/tags/{tag_key}", headers=headers + ) + + def get_tag_assignment(self, entity_type: str, entity_id: str, tag_key: str) -> TagAssignment: + """Get a tag assignment + + :param entity_type: str + The type of entity to which the tag is assigned. Allowed value is dashboards + :param entity_id: str + The identifier of the entity to which the tag is assigned + :param tag_key: str + The key of the tag. The characters , . : / - = and leading/trailing spaces are not allowed + + :returns: :class:`TagAssignment` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do( + "GET", f"/api/2.0/entity-tag-assignments/{entity_type}/{entity_id}/tags/{tag_key}", headers=headers + ) + return TagAssignment.from_dict(res) + + def list_tag_assignments( + self, entity_type: str, entity_id: str, *, page_size: Optional[int] = None, page_token: Optional[str] = None + ) -> Iterator[TagAssignment]: + """List the tag assignments for an entity + + :param entity_type: str + The type of entity to which the tag is assigned. Allowed value is dashboards + :param entity_id: str + The identifier of the entity to which the tag is assigned + :param page_size: int (optional) + Optional. Maximum number of tag assignments to return in a single page + :param page_token: str (optional) + Pagination token to go to the next page of tag assignments. Requests first page if absent. + + :returns: Iterator over :class:`TagAssignment` + """ + + query = {} + if page_size is not None: + query["page_size"] = page_size + if page_token is not None: + query["page_token"] = page_token + headers = { + "Accept": "application/json", + } + + while True: + json = self._api.do( + "GET", f"/api/2.0/entity-tag-assignments/{entity_type}/{entity_id}/tags", query=query, headers=headers + ) + if "tag_assignments" in json: + for v in json["tag_assignments"]: + yield TagAssignment.from_dict(v) + if "next_page_token" not in json or not json["next_page_token"]: + return + query["page_token"] = json["next_page_token"] + + def update_tag_assignment( + self, entity_type: str, entity_id: str, tag_key: str, tag_assignment: TagAssignment, update_mask: str + ) -> TagAssignment: + """Update a tag assignment + + :param entity_type: str + The type of entity to which the tag is assigned. Allowed value is dashboards + :param entity_id: str + The identifier of the entity to which the tag is assigned + :param tag_key: str + The key of the tag. The characters , . : / - = and leading/trailing spaces are not allowed + :param tag_assignment: :class:`TagAssignment` + :param update_mask: str + The field mask must be a single string, with multiple fields separated by commas (no spaces). The + field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., + `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only + the entire collection field can be specified. Field names must exactly match the resource field + names. + + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the + fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API + changes in the future. + + :returns: :class:`TagAssignment` + """ + + body = tag_assignment.as_dict() + query = {} + if update_mask is not None: + query["update_mask"] = update_mask + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "PATCH", + f"/api/2.0/entity-tag-assignments/{entity_type}/{entity_id}/tags/{tag_key}", + query=query, + body=body, + headers=headers, + ) + return TagAssignment.from_dict(res) + + class TagPoliciesAPI: """The Tag Policy API allows you to manage policies for governed tags in Databricks. Permissions for tag policies can be managed using the [Account Access Control Proxy API]. diff --git a/databricks/sdk/service/vectorsearch.py b/databricks/sdk/service/vectorsearch.py index a0b731ffa..4824db789 100755 --- a/databricks/sdk/service/vectorsearch.py +++ b/databricks/sdk/service/vectorsearch.py @@ -21,16 +21,66 @@ # all definitions in this file are in alphabetical order +@dataclass +class AdjustedThroughputRequest: + """Adjusted throughput request parameters""" + + concurrency: Optional[float] = None + """Adjusted concurrency (total CPU) for the endpoint""" + + maximum_concurrency_allowed: Optional[float] = None + """Adjusted maximum concurrency allowed for the endpoint""" + + minimal_concurrency_allowed: Optional[float] = None + """Adjusted minimum concurrency allowed for the endpoint""" + + def as_dict(self) -> dict: + """Serializes the AdjustedThroughputRequest into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.concurrency is not None: + body["concurrency"] = self.concurrency + if self.maximum_concurrency_allowed is not None: + body["maximum_concurrency_allowed"] = self.maximum_concurrency_allowed + if self.minimal_concurrency_allowed is not None: + body["minimal_concurrency_allowed"] = self.minimal_concurrency_allowed + return body + + def as_shallow_dict(self) -> dict: + """Serializes the AdjustedThroughputRequest into a shallow dictionary of its immediate attributes.""" + body = {} + if self.concurrency is not None: + body["concurrency"] = self.concurrency + if self.maximum_concurrency_allowed is not None: + body["maximum_concurrency_allowed"] = self.maximum_concurrency_allowed + if self.minimal_concurrency_allowed is not None: + body["minimal_concurrency_allowed"] = self.minimal_concurrency_allowed + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> AdjustedThroughputRequest: + """Deserializes the AdjustedThroughputRequest from a dictionary.""" + return cls( + concurrency=d.get("concurrency", None), + maximum_concurrency_allowed=d.get("maximum_concurrency_allowed", None), + minimal_concurrency_allowed=d.get("minimal_concurrency_allowed", None), + ) + + @dataclass class ColumnInfo: name: Optional[str] = None """Name of the column.""" + type_text: Optional[str] = None + """Data type of the column (e.g., "string", "int", "array")""" + def as_dict(self) -> dict: """Serializes the ColumnInfo into a dictionary suitable for use as a JSON request body.""" body = {} if self.name is not None: body["name"] = self.name + if self.type_text is not None: + body["type_text"] = self.type_text return body def as_shallow_dict(self) -> dict: @@ -38,12 +88,14 @@ def as_shallow_dict(self) -> dict: body = {} if self.name is not None: body["name"] = self.name + if self.type_text is not None: + body["type_text"] = self.type_text return body @classmethod def from_dict(cls, d: Dict[str, Any]) -> ColumnInfo: """Deserializes the ColumnInfo from a dictionary.""" - return cls(name=d.get("name", None)) + return cls(name=d.get("name", None), type_text=d.get("type_text", None)) @dataclass @@ -194,6 +246,11 @@ class DeltaSyncVectorIndexSpecRequest: columns from the source table are synced with the index. The primary key column and embedding source column or embedding vector column are always synced.""" + effective_budget_policy_id: Optional[str] = None + """The budget policy id applied to the vector search index""" + + effective_usage_policy_id: Optional[str] = None + embedding_source_columns: Optional[List[EmbeddingSourceColumn]] = None """The columns that contain the embedding source.""" @@ -218,6 +275,10 @@ def as_dict(self) -> dict: body = {} if self.columns_to_sync: body["columns_to_sync"] = [v for v in self.columns_to_sync] + if self.effective_budget_policy_id is not None: + body["effective_budget_policy_id"] = self.effective_budget_policy_id + if self.effective_usage_policy_id is not None: + body["effective_usage_policy_id"] = self.effective_usage_policy_id if self.embedding_source_columns: body["embedding_source_columns"] = [v.as_dict() for v in self.embedding_source_columns] if self.embedding_vector_columns: @@ -235,6 +296,10 @@ def as_shallow_dict(self) -> dict: body = {} if self.columns_to_sync: body["columns_to_sync"] = self.columns_to_sync + if self.effective_budget_policy_id is not None: + body["effective_budget_policy_id"] = self.effective_budget_policy_id + if self.effective_usage_policy_id is not None: + body["effective_usage_policy_id"] = self.effective_usage_policy_id if self.embedding_source_columns: body["embedding_source_columns"] = self.embedding_source_columns if self.embedding_vector_columns: @@ -252,6 +317,8 @@ def from_dict(cls, d: Dict[str, Any]) -> DeltaSyncVectorIndexSpecRequest: """Deserializes the DeltaSyncVectorIndexSpecRequest from a dictionary.""" return cls( columns_to_sync=d.get("columns_to_sync", None), + effective_budget_policy_id=d.get("effective_budget_policy_id", None), + effective_usage_policy_id=d.get("effective_usage_policy_id", None), embedding_source_columns=_repeated_dict(d, "embedding_source_columns", EmbeddingSourceColumn), embedding_vector_columns=_repeated_dict(d, "embedding_vector_columns", EmbeddingVectorColumn), embedding_writeback_table=d.get("embedding_writeback_table", None), @@ -262,6 +329,11 @@ def from_dict(cls, d: Dict[str, Any]) -> DeltaSyncVectorIndexSpecRequest: @dataclass class DeltaSyncVectorIndexSpecResponse: + effective_budget_policy_id: Optional[str] = None + """The budget policy id applied to the vector search index""" + + effective_usage_policy_id: Optional[str] = None + embedding_source_columns: Optional[List[EmbeddingSourceColumn]] = None """The columns that contain the embedding source.""" @@ -287,6 +359,10 @@ class DeltaSyncVectorIndexSpecResponse: def as_dict(self) -> dict: """Serializes the DeltaSyncVectorIndexSpecResponse into a dictionary suitable for use as a JSON request body.""" body = {} + if self.effective_budget_policy_id is not None: + body["effective_budget_policy_id"] = self.effective_budget_policy_id + if self.effective_usage_policy_id is not None: + body["effective_usage_policy_id"] = self.effective_usage_policy_id if self.embedding_source_columns: body["embedding_source_columns"] = [v.as_dict() for v in self.embedding_source_columns] if self.embedding_vector_columns: @@ -304,6 +380,10 @@ def as_dict(self) -> dict: def as_shallow_dict(self) -> dict: """Serializes the DeltaSyncVectorIndexSpecResponse into a shallow dictionary of its immediate attributes.""" body = {} + if self.effective_budget_policy_id is not None: + body["effective_budget_policy_id"] = self.effective_budget_policy_id + if self.effective_usage_policy_id is not None: + body["effective_usage_policy_id"] = self.effective_usage_policy_id if self.embedding_source_columns: body["embedding_source_columns"] = self.embedding_source_columns if self.embedding_vector_columns: @@ -322,6 +402,8 @@ def as_shallow_dict(self) -> dict: def from_dict(cls, d: Dict[str, Any]) -> DeltaSyncVectorIndexSpecResponse: """Deserializes the DeltaSyncVectorIndexSpecResponse from a dictionary.""" return cls( + effective_budget_policy_id=d.get("effective_budget_policy_id", None), + effective_usage_policy_id=d.get("effective_usage_policy_id", None), embedding_source_columns=_repeated_dict(d, "embedding_source_columns", EmbeddingSourceColumn), embedding_vector_columns=_repeated_dict(d, "embedding_vector_columns", EmbeddingVectorColumn), embedding_writeback_table=d.get("embedding_writeback_table", None), @@ -486,6 +568,9 @@ class EndpointInfo: num_indexes: Optional[int] = None """Number of indexes on the endpoint""" + throughput_info: Optional[EndpointThroughputInfo] = None + """Throughput information for the endpoint""" + def as_dict(self) -> dict: """Serializes the EndpointInfo into a dictionary suitable for use as a JSON request body.""" body = {} @@ -511,6 +596,8 @@ def as_dict(self) -> dict: body["name"] = self.name if self.num_indexes is not None: body["num_indexes"] = self.num_indexes + if self.throughput_info: + body["throughput_info"] = self.throughput_info.as_dict() return body def as_shallow_dict(self) -> dict: @@ -538,6 +625,8 @@ def as_shallow_dict(self) -> dict: body["name"] = self.name if self.num_indexes is not None: body["num_indexes"] = self.num_indexes + if self.throughput_info: + body["throughput_info"] = self.throughput_info return body @classmethod @@ -555,6 +644,7 @@ def from_dict(cls, d: Dict[str, Any]) -> EndpointInfo: last_updated_user=d.get("last_updated_user", None), name=d.get("name", None), num_indexes=d.get("num_indexes", None), + throughput_info=_from_dict(d, "throughput_info", EndpointThroughputInfo), ) @@ -602,6 +692,99 @@ class EndpointStatusState(Enum): YELLOW_STATE = "YELLOW_STATE" +@dataclass +class EndpointThroughputInfo: + """Throughput information for an endpoint""" + + change_request_message: Optional[str] = None + """Additional information about the throughput change request""" + + change_request_state: Optional[ThroughputChangeRequestState] = None + """The state of the most recent throughput change request""" + + current_concurrency: Optional[float] = None + """The current concurrency (total CPU) allocated to the endpoint""" + + current_concurrency_utilization_percentage: Optional[float] = None + """The current utilization of concurrency as a percentage (0-100)""" + + current_num_replicas: Optional[int] = None + """The current number of replicas allocated to the endpoint""" + + maximum_concurrency_allowed: Optional[float] = None + """The maximum concurrency allowed for this endpoint""" + + minimal_concurrency_allowed: Optional[float] = None + """The minimum concurrency allowed for this endpoint""" + + requested_concurrency: Optional[float] = None + """The requested concurrency (total CPU) for the endpoint""" + + requested_num_replicas: Optional[int] = None + """The requested number of replicas for the endpoint""" + + def as_dict(self) -> dict: + """Serializes the EndpointThroughputInfo into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.change_request_message is not None: + body["change_request_message"] = self.change_request_message + if self.change_request_state is not None: + body["change_request_state"] = self.change_request_state.value + if self.current_concurrency is not None: + body["current_concurrency"] = self.current_concurrency + if self.current_concurrency_utilization_percentage is not None: + body["current_concurrency_utilization_percentage"] = self.current_concurrency_utilization_percentage + if self.current_num_replicas is not None: + body["current_num_replicas"] = self.current_num_replicas + if self.maximum_concurrency_allowed is not None: + body["maximum_concurrency_allowed"] = self.maximum_concurrency_allowed + if self.minimal_concurrency_allowed is not None: + body["minimal_concurrency_allowed"] = self.minimal_concurrency_allowed + if self.requested_concurrency is not None: + body["requested_concurrency"] = self.requested_concurrency + if self.requested_num_replicas is not None: + body["requested_num_replicas"] = self.requested_num_replicas + return body + + def as_shallow_dict(self) -> dict: + """Serializes the EndpointThroughputInfo into a shallow dictionary of its immediate attributes.""" + body = {} + if self.change_request_message is not None: + body["change_request_message"] = self.change_request_message + if self.change_request_state is not None: + body["change_request_state"] = self.change_request_state + if self.current_concurrency is not None: + body["current_concurrency"] = self.current_concurrency + if self.current_concurrency_utilization_percentage is not None: + body["current_concurrency_utilization_percentage"] = self.current_concurrency_utilization_percentage + if self.current_num_replicas is not None: + body["current_num_replicas"] = self.current_num_replicas + if self.maximum_concurrency_allowed is not None: + body["maximum_concurrency_allowed"] = self.maximum_concurrency_allowed + if self.minimal_concurrency_allowed is not None: + body["minimal_concurrency_allowed"] = self.minimal_concurrency_allowed + if self.requested_concurrency is not None: + body["requested_concurrency"] = self.requested_concurrency + if self.requested_num_replicas is not None: + body["requested_num_replicas"] = self.requested_num_replicas + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> EndpointThroughputInfo: + """Deserializes the EndpointThroughputInfo from a dictionary.""" + return cls( + change_request_message=d.get("change_request_message", None), + change_request_state=_enum(d, "change_request_state", ThroughputChangeRequestState), + current_concurrency=d.get("current_concurrency", None), + current_concurrency_utilization_percentage=d.get("current_concurrency_utilization_percentage", None), + current_num_replicas=d.get("current_num_replicas", None), + maximum_concurrency_allowed=d.get("maximum_concurrency_allowed", None), + minimal_concurrency_allowed=d.get("minimal_concurrency_allowed", None), + requested_concurrency=d.get("requested_concurrency", None), + requested_num_replicas=d.get("requested_num_replicas", None), + ) + + class EndpointType(Enum): """Type of endpoint.""" @@ -737,6 +920,153 @@ def from_dict(cls, d: Dict[str, Any]) -> MapStringValueEntry: return cls(key=d.get("key", None), value=_from_dict(d, "value", Value)) +@dataclass +class Metric: + """Metric specification""" + + labels: Optional[List[MetricLabel]] = None + """Metric labels""" + + name: Optional[str] = None + """Metric name""" + + percentile: Optional[float] = None + """Percentile for the metric""" + + def as_dict(self) -> dict: + """Serializes the Metric into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.labels: + body["labels"] = [v.as_dict() for v in self.labels] + if self.name is not None: + body["name"] = self.name + if self.percentile is not None: + body["percentile"] = self.percentile + return body + + def as_shallow_dict(self) -> dict: + """Serializes the Metric into a shallow dictionary of its immediate attributes.""" + body = {} + if self.labels: + body["labels"] = self.labels + if self.name is not None: + body["name"] = self.name + if self.percentile is not None: + body["percentile"] = self.percentile + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> Metric: + """Deserializes the Metric from a dictionary.""" + return cls( + labels=_repeated_dict(d, "labels", MetricLabel), + name=d.get("name", None), + percentile=d.get("percentile", None), + ) + + +@dataclass +class MetricLabel: + """Label for a metric""" + + name: Optional[str] = None + """Label name""" + + value: Optional[str] = None + """Label value""" + + def as_dict(self) -> dict: + """Serializes the MetricLabel into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.name is not None: + body["name"] = self.name + if self.value is not None: + body["value"] = self.value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the MetricLabel into a shallow dictionary of its immediate attributes.""" + body = {} + if self.name is not None: + body["name"] = self.name + if self.value is not None: + body["value"] = self.value + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> MetricLabel: + """Deserializes the MetricLabel from a dictionary.""" + return cls(name=d.get("name", None), value=d.get("value", None)) + + +@dataclass +class MetricValue: + """Single metric value at a specific timestamp""" + + timestamp: Optional[int] = None + """Timestamp of the metric value (milliseconds since epoch)""" + + value: Optional[float] = None + """Metric value""" + + def as_dict(self) -> dict: + """Serializes the MetricValue into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.timestamp is not None: + body["timestamp"] = self.timestamp + if self.value is not None: + body["value"] = self.value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the MetricValue into a shallow dictionary of its immediate attributes.""" + body = {} + if self.timestamp is not None: + body["timestamp"] = self.timestamp + if self.value is not None: + body["value"] = self.value + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> MetricValue: + """Deserializes the MetricValue from a dictionary.""" + return cls(timestamp=d.get("timestamp", None), value=d.get("value", None)) + + +@dataclass +class MetricValues: + """Collection of metric values for a specific metric""" + + metric: Optional[Metric] = None + """Metric specification""" + + values: Optional[List[MetricValue]] = None + """Time series of metric values""" + + def as_dict(self) -> dict: + """Serializes the MetricValues into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.metric: + body["metric"] = self.metric.as_dict() + if self.values: + body["values"] = [v.as_dict() for v in self.values] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the MetricValues into a shallow dictionary of its immediate attributes.""" + body = {} + if self.metric: + body["metric"] = self.metric + if self.values: + body["values"] = self.values + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> MetricValues: + """Deserializes the MetricValues from a dictionary.""" + return cls(metric=_from_dict(d, "metric", Metric), values=_repeated_dict(d, "values", MetricValue)) + + @dataclass class MiniVectorIndex: creator: Optional[str] = None @@ -820,6 +1150,50 @@ def from_dict(cls, d: Dict[str, Any]) -> PatchEndpointBudgetPolicyResponse: return cls(effective_budget_policy_id=d.get("effective_budget_policy_id", None)) +@dataclass +class PatchEndpointThroughputResponse: + adjusted_request: Optional[AdjustedThroughputRequest] = None + """The adjusted request if the original request could not be fully fulfilled. This is only + populated when the request was adjusted.""" + + message: Optional[str] = None + """Message explaining the status or any adjustments made""" + + status: Optional[ThroughputPatchStatus] = None + """The status of the throughput change request""" + + def as_dict(self) -> dict: + """Serializes the PatchEndpointThroughputResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.adjusted_request: + body["adjusted_request"] = self.adjusted_request.as_dict() + if self.message is not None: + body["message"] = self.message + if self.status is not None: + body["status"] = self.status.value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the PatchEndpointThroughputResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.adjusted_request: + body["adjusted_request"] = self.adjusted_request + if self.message is not None: + body["message"] = self.message + if self.status is not None: + body["status"] = self.status + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> PatchEndpointThroughputResponse: + """Deserializes the PatchEndpointThroughputResponse from a dictionary.""" + return cls( + adjusted_request=_from_dict(d, "adjusted_request", AdjustedThroughputRequest), + message=d.get("message", None), + status=_enum(d, "status", ThroughputPatchStatus), + ) + + class PipelineType(Enum): """Pipeline execution mode. - `TRIGGERED`: If the pipeline uses the triggered execution mode, the system stops processing after successfully refreshing the source table in the pipeline once, @@ -998,6 +1372,44 @@ def from_dict(cls, d: Dict[str, Any]) -> ResultManifest: return cls(column_count=d.get("column_count", None), columns=_repeated_dict(d, "columns", ColumnInfo)) +@dataclass +class RetrieveUserVisibleMetricsResponse: + """Response containing user-visible metrics""" + + metric_values: Optional[List[MetricValues]] = None + """Collection of metric values""" + + next_page_token: Optional[str] = None + """A token that can be used to get the next page of results. If not present, there are no more + results to show.""" + + def as_dict(self) -> dict: + """Serializes the RetrieveUserVisibleMetricsResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.metric_values: + body["metric_values"] = [v.as_dict() for v in self.metric_values] + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + def as_shallow_dict(self) -> dict: + """Serializes the RetrieveUserVisibleMetricsResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.metric_values: + body["metric_values"] = self.metric_values + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> RetrieveUserVisibleMetricsResponse: + """Deserializes the RetrieveUserVisibleMetricsResponse from a dictionary.""" + return cls( + metric_values=_repeated_dict(d, "metric_values", MetricValues), + next_page_token=d.get("next_page_token", None), + ) + + @dataclass class ScanVectorIndexResponse: """Response to a scan vector index request.""" @@ -1075,6 +1487,25 @@ def from_dict(cls, d: Dict[str, Any]) -> SyncIndexResponse: return cls() +class ThroughputChangeRequestState(Enum): + """Throughput change request state""" + + CHANGE_ADJUSTED = "CHANGE_ADJUSTED" + CHANGE_FAILED = "CHANGE_FAILED" + CHANGE_IN_PROGRESS = "CHANGE_IN_PROGRESS" + CHANGE_REACHED_MAXIMUM = "CHANGE_REACHED_MAXIMUM" + CHANGE_REACHED_MINIMUM = "CHANGE_REACHED_MINIMUM" + CHANGE_SUCCESS = "CHANGE_SUCCESS" + + +class ThroughputPatchStatus(Enum): + """Response status for throughput change requests""" + + PATCH_ACCEPTED = "PATCH_ACCEPTED" + PATCH_FAILED = "PATCH_FAILED" + PATCH_REJECTED = "PATCH_REJECTED" + + @dataclass class UpdateEndpointCustomTagsResponse: custom_tags: Optional[List[CustomTag]] = None @@ -1107,6 +1538,24 @@ def from_dict(cls, d: Dict[str, Any]) -> UpdateEndpointCustomTagsResponse: return cls(custom_tags=_repeated_dict(d, "custom_tags", CustomTag), name=d.get("name", None)) +@dataclass +class UpdateVectorIndexUsagePolicyResponse: + def as_dict(self) -> dict: + """Serializes the UpdateVectorIndexUsagePolicyResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + def as_shallow_dict(self) -> dict: + """Serializes the UpdateVectorIndexUsagePolicyResponse into a shallow dictionary of its immediate attributes.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> UpdateVectorIndexUsagePolicyResponse: + """Deserializes the UpdateVectorIndexUsagePolicyResponse from a dictionary.""" + return cls() + + @dataclass class UpsertDataResult: failed_primary_keys: Optional[List[str]] = None @@ -1416,7 +1865,13 @@ def wait_get_endpoint_vector_search_endpoint_online( raise TimeoutError(f"timed out after {timeout}: {status_message}") def create_endpoint( - self, name: str, endpoint_type: EndpointType, *, budget_policy_id: Optional[str] = None + self, + name: str, + endpoint_type: EndpointType, + *, + budget_policy_id: Optional[str] = None, + num_replicas: Optional[int] = None, + usage_policy_id: Optional[str] = None, ) -> Wait[EndpointInfo]: """Create a new endpoint. @@ -1426,6 +1881,10 @@ def create_endpoint( Type of endpoint :param budget_policy_id: str (optional) The budget policy id to be applied + :param num_replicas: int (optional) + Initial number of replicas for the endpoint. If not specified, defaults to 1. + :param usage_policy_id: str (optional) + The usage policy id to be applied once we've migrated to usage policies :returns: Long-running operation waiter for :class:`EndpointInfo`. @@ -1439,6 +1898,10 @@ def create_endpoint( body["endpoint_type"] = endpoint_type.value if name is not None: body["name"] = name + if num_replicas is not None: + body["num_replicas"] = num_replicas + if usage_policy_id is not None: + body["usage_policy_id"] = usage_policy_id headers = { "Accept": "application/json", "Content-Type": "application/json", @@ -1457,11 +1920,17 @@ def create_endpoint_and_wait( endpoint_type: EndpointType, *, budget_policy_id: Optional[str] = None, + num_replicas: Optional[int] = None, + usage_policy_id: Optional[str] = None, timeout=timedelta(minutes=20), ) -> EndpointInfo: - return self.create_endpoint(budget_policy_id=budget_policy_id, endpoint_type=endpoint_type, name=name).result( - timeout=timeout - ) + return self.create_endpoint( + budget_policy_id=budget_policy_id, + endpoint_type=endpoint_type, + name=name, + num_replicas=num_replicas, + usage_policy_id=usage_policy_id, + ).result(timeout=timeout) def delete_endpoint(self, endpoint_name: str): """Delete a vector search endpoint. @@ -1519,6 +1988,106 @@ def list_endpoints(self, *, page_token: Optional[str] = None) -> Iterator[Endpoi return query["page_token"] = json["next_page_token"] + def patch_endpoint_throughput( + self, + endpoint_name: str, + *, + all_or_nothing: Optional[bool] = None, + concurrency: Optional[float] = None, + maximum_concurrency_allowed: Optional[float] = None, + minimal_concurrency_allowed: Optional[float] = None, + num_replicas: Optional[int] = None, + ) -> PatchEndpointThroughputResponse: + """Update the throughput (concurrency) of an endpoint + + :param endpoint_name: str + Name of the vector search endpoint + :param all_or_nothing: bool (optional) + If true, the request will fail if the requested concurrency or limits cannot be exactly met. If + false, the request will be adjusted to the closest possible value. + :param concurrency: float (optional) + Requested concurrency (total CPU) for the endpoint. If not specified, the current concurrency is + maintained. + :param maximum_concurrency_allowed: float (optional) + Maximum concurrency allowed for the endpoint. If not specified, the current maximum is maintained. + :param minimal_concurrency_allowed: float (optional) + Minimum concurrency allowed for the endpoint. If not specified, the current minimum is maintained. + :param num_replicas: int (optional) + Requested number of data copies for the endpoint (including primary). For example: num_replicas=2 + means 2 total copies of the data (1 primary + 1 replica). If not specified, the current replication + factor is maintained. Valid range: 1-6 (where 1 = no replication, 6 = 1 primary + 5 replicas). + + :returns: :class:`PatchEndpointThroughputResponse` + """ + + body = {} + if all_or_nothing is not None: + body["all_or_nothing"] = all_or_nothing + if concurrency is not None: + body["concurrency"] = concurrency + if maximum_concurrency_allowed is not None: + body["maximum_concurrency_allowed"] = maximum_concurrency_allowed + if minimal_concurrency_allowed is not None: + body["minimal_concurrency_allowed"] = minimal_concurrency_allowed + if num_replicas is not None: + body["num_replicas"] = num_replicas + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "PATCH", f"/api/2.0/vector-search/endpoints/{endpoint_name}/throughput", body=body, headers=headers + ) + return PatchEndpointThroughputResponse.from_dict(res) + + def retrieve_user_visible_metrics( + self, + name: str, + *, + end_time: Optional[str] = None, + granularity_in_seconds: Optional[int] = None, + metrics: Optional[List[Metric]] = None, + page_token: Optional[str] = None, + start_time: Optional[str] = None, + ) -> RetrieveUserVisibleMetricsResponse: + """Retrieve user-visible metrics for an endpoint + + :param name: str + Vector search endpoint name + :param end_time: str (optional) + End time for metrics query + :param granularity_in_seconds: int (optional) + Granularity in seconds + :param metrics: List[:class:`Metric`] (optional) + List of metrics to retrieve + :param page_token: str (optional) + Token for pagination + :param start_time: str (optional) + Start time for metrics query + + :returns: :class:`RetrieveUserVisibleMetricsResponse` + """ + + body = {} + if end_time is not None: + body["end_time"] = end_time + if granularity_in_seconds is not None: + body["granularity_in_seconds"] = granularity_in_seconds + if metrics is not None: + body["metrics"] = [v.as_dict() for v in metrics] + if page_token is not None: + body["page_token"] = page_token + if start_time is not None: + body["start_time"] = start_time + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", f"/api/2.0/vector-search/endpoints/{name}/metrics", body=body, headers=headers) + return RetrieveUserVisibleMetricsResponse.from_dict(res) + def update_endpoint_budget_policy( self, endpoint_name: str, budget_policy_id: str ) -> PatchEndpointBudgetPolicyResponse: @@ -1871,6 +2440,22 @@ def sync_index(self, index_name: str): self._api.do("POST", f"/api/2.0/vector-search/indexes/{index_name}/sync", headers=headers) + def update_index_budget_policy(self, index_name: str) -> UpdateVectorIndexUsagePolicyResponse: + """Update the budget policy of an index + + :param index_name: str + Name of the vector search index + + :returns: :class:`UpdateVectorIndexUsagePolicyResponse` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do("PATCH", f"/api/2.0/vector-search/indexes/{index_name}/usage-policy", headers=headers) + return UpdateVectorIndexUsagePolicyResponse.from_dict(res) + def upsert_data_vector_index(self, index_name: str, inputs_json: str) -> UpsertDataVectorIndexResponse: """Handles the upserting of data into a specified vector index. diff --git a/databricks/sdk/service/workspace.py b/databricks/sdk/service/workspace.py index 7ef18fddb..80da90899 100755 --- a/databricks/sdk/service/workspace.py +++ b/databricks/sdk/service/workspace.py @@ -397,6 +397,12 @@ class ExportFormat(Enum): SOURCE = "SOURCE" +class ExportOutputs(Enum): + + ALL = "ALL" + NONE = "NONE" + + @dataclass class ExportResponse: """The request field `direct_download` determines whether a JSON response or binary contents are @@ -2614,7 +2620,9 @@ def delete(self, path: str, *, recursive: Optional[bool] = None): self._api.do("POST", "/api/2.0/workspace/delete", body=body, headers=headers) - def export(self, path: str, *, format: Optional[ExportFormat] = None) -> ExportResponse: + def export( + self, path: str, *, format: Optional[ExportFormat] = None, outputs: Optional[ExportOutputs] = None + ) -> ExportResponse: """Exports an object or the contents of an entire directory. If `path` does not exist, this call returns an error `RESOURCE_DOES_NOT_EXIST`. @@ -2636,6 +2644,11 @@ def export(self, path: str, *, format: Optional[ExportFormat] = None) -> ExportR Directory exports will not include non-notebook entries. - `R_MARKDOWN`: The notebook is exported to R Markdown format. - `AUTO`: The object or directory is exported depending on the objects type. Directory exports will include notebooks and workspace files. + :param outputs: :class:`ExportOutputs` (optional) + This specifies which cell outputs should be included in the export (if the export format allows it). + If not specified, the behavior is determined by the format. For JUPYTER format, the default is to + include all outputs. This is a public endpoint, but only ALL or NONE is documented publically, + DATABRICKS is internal only :returns: :class:`ExportResponse` """ @@ -2643,6 +2656,8 @@ def export(self, path: str, *, format: Optional[ExportFormat] = None) -> ExportR query = {} if format is not None: query["format"] = format.value + if outputs is not None: + query["outputs"] = outputs.value if path is not None: query["path"] = path headers = { @@ -2658,7 +2673,9 @@ def get_permission_levels( """Gets the permission levels that a user can have on an object. :param workspace_object_type: str - The workspace object type for which to get or manage permissions. + The workspace object type for which to get or manage permissions. Could be one of the following: + alerts, alertsv2, dashboards, dbsql-dashboards, directories, experiments, files, genie, notebooks, + queries :param workspace_object_id: str The workspace object for which to get or manage permissions. @@ -2681,7 +2698,9 @@ def get_permissions(self, workspace_object_type: str, workspace_object_id: str) parent objects or root object. :param workspace_object_type: str - The workspace object type for which to get or manage permissions. + The workspace object type for which to get or manage permissions. Could be one of the following: + alerts, alertsv2, dashboards, dbsql-dashboards, directories, experiments, files, genie, notebooks, + queries :param workspace_object_id: str The workspace object for which to get or manage permissions. @@ -2840,7 +2859,9 @@ def set_permissions( object. :param workspace_object_type: str - The workspace object type for which to get or manage permissions. + The workspace object type for which to get or manage permissions. Could be one of the following: + alerts, alertsv2, dashboards, dbsql-dashboards, directories, experiments, files, genie, notebooks, + queries :param workspace_object_id: str The workspace object for which to get or manage permissions. :param access_control_list: List[:class:`WorkspaceObjectAccessControlRequest`] (optional) @@ -2872,7 +2893,9 @@ def update_permissions( parent objects or root object. :param workspace_object_type: str - The workspace object type for which to get or manage permissions. + The workspace object type for which to get or manage permissions. Could be one of the following: + alerts, alertsv2, dashboards, dbsql-dashboards, directories, experiments, files, genie, notebooks, + queries :param workspace_object_id: str The workspace object for which to get or manage permissions. :param access_control_list: List[:class:`WorkspaceObjectAccessControlRequest`] (optional) diff --git a/docs/account/billing/budget_policy.rst b/docs/account/billing/budget_policy.rst index 3c2cbd92e..2ee10bcea 100644 --- a/docs/account/billing/budget_policy.rst +++ b/docs/account/billing/budget_policy.rst @@ -62,7 +62,7 @@ :returns: Iterator over :class:`BudgetPolicy` - .. py:method:: update(policy_id: str, policy: BudgetPolicy [, limit_config: Optional[LimitConfig]]) -> BudgetPolicy + .. py:method:: update(policy_id: str, policy: BudgetPolicy [, limit_config: Optional[LimitConfig], update_mask: Optional[str]]) -> BudgetPolicy Updates a policy @@ -73,6 +73,8 @@ specified even if not changed. The `policy_id` is used to identify the policy to update. :param limit_config: :class:`LimitConfig` (optional) DEPRECATED. This is redundant field as LimitConfig is part of the BudgetPolicy + :param update_mask: str (optional) + Field mask specifying which fields to update. When not provided, all fields are updated. :returns: :class:`BudgetPolicy` \ No newline at end of file diff --git a/docs/account/billing/index.rst b/docs/account/billing/index.rst index b8b317616..08241184d 100644 --- a/docs/account/billing/index.rst +++ b/docs/account/billing/index.rst @@ -11,4 +11,5 @@ Configure different aspects of Databricks billing and usage. budget_policy budgets log_delivery - usage_dashboards \ No newline at end of file + usage_dashboards + usage_policy \ No newline at end of file diff --git a/docs/account/billing/usage_policy.rst b/docs/account/billing/usage_policy.rst new file mode 100644 index 000000000..95a65c111 --- /dev/null +++ b/docs/account/billing/usage_policy.rst @@ -0,0 +1,69 @@ +``a.usage_policy``: Usage Policy +================================ +.. currentmodule:: databricks.sdk.service.billing + +.. py:class:: UsagePolicyAPI + + A service serves REST API about Usage policies + + .. py:method:: create( [, policy: Optional[UsagePolicy], request_id: Optional[str]]) -> UsagePolicy + + Creates a new usage policy. + + :param policy: :class:`UsagePolicy` (optional) + The policy to create. `policy_id` needs to be empty as it will be generated + :param request_id: str (optional) + A unique identifier for this request. Restricted to 36 ASCII characters. + + :returns: :class:`UsagePolicy` + + + .. py:method:: delete(policy_id: str) + + Deletes a usage policy + + :param policy_id: str + The Id of the policy. + + + + + .. py:method:: get(policy_id: str) -> UsagePolicy + + Retrieves a usage policy by it's ID. + + :param policy_id: str + The Id of the policy. + + :returns: :class:`UsagePolicy` + + + .. py:method:: list( [, filter_by: Optional[Filter], page_size: Optional[int], page_token: Optional[str], sort_spec: Optional[SortSpec]]) -> Iterator[UsagePolicy] + + Lists all usage policies. Policies are returned in the alphabetically ascending order of their names. + + :param filter_by: :class:`Filter` (optional) + A filter to apply to the list of policies. + :param page_size: int (optional) + The maximum number of usage policies to return. + :param page_token: str (optional) + A page token, received from a previous `ListUsagePolicies` call. + :param sort_spec: :class:`SortSpec` (optional) + The sort specification. + + :returns: Iterator over :class:`UsagePolicy` + + + .. py:method:: update(policy_id: str, policy: UsagePolicy [, limit_config: Optional[LimitConfig]]) -> UsagePolicy + + Updates a usage policy + + :param policy_id: str + The Id of the policy. This field is generated by Databricks and globally unique. + :param policy: :class:`UsagePolicy` + The policy to update. `creator_user_id` cannot be specified in the request. + :param limit_config: :class:`LimitConfig` (optional) + DEPRECATED. This is redundant field as LimitConfig is part of the UsagePolicy + + :returns: :class:`UsagePolicy` + \ No newline at end of file diff --git a/docs/account/iam/groups_v2.rst b/docs/account/iam/groups_v2.rst index 9a38fb63d..622277161 100644 --- a/docs/account/iam/groups_v2.rst +++ b/docs/account/iam/groups_v2.rst @@ -52,8 +52,9 @@ .. py:method:: list( [, attributes: Optional[str], count: Optional[int], excluded_attributes: Optional[str], filter: Optional[str], sort_by: Optional[str], sort_order: Optional[ListSortOrder], start_index: Optional[int]]) -> Iterator[AccountGroup] Gets all details of the groups associated with the Databricks account. As of 08/22/2025, this endpoint - will not return members. Instead, members should be retrieved by iterating through `Get group - details`. + will no longer return members. Instead, members should be retrieved by iterating through `Get group + details`. Existing accounts that rely on this attribute will not be impacted and will continue + receiving member data as before. :param attributes: str (optional) Comma-separated list of attributes to return in response. diff --git a/docs/account/iam/workspace_assignment.rst b/docs/account/iam/workspace_assignment.rst index 2a8043172..ca78b86df 100644 --- a/docs/account/iam/workspace_assignment.rst +++ b/docs/account/iam/workspace_assignment.rst @@ -43,9 +43,9 @@ a = AccountClient() - workspace_id = os.environ["DUMMY_WORKSPACE_ID"] + workspace_id = os.environ["TEST_WORKSPACE_ID"] - all = a.workspace_assignment.list(workspace_id=workspace_id) + all = a.workspace_assignment.list(list=workspace_id) Get the permission assignments for the specified Databricks account and Databricks workspace. diff --git a/docs/account/iamv2/iam_v2.rst b/docs/account/iamv2/iam_v2.rst index ea9c48287..5807db592 100644 --- a/docs/account/iamv2/iam_v2.rst +++ b/docs/account/iamv2/iam_v2.rst @@ -6,6 +6,120 @@ These APIs are used to manage identities and the workspace access of these identities in . + .. py:method:: create_group(group: Group) -> Group + + TODO: Write description later when this method is implemented + + :param group: :class:`Group` + Required. Group to be created in + + :returns: :class:`Group` + + + .. py:method:: create_service_principal(service_principal: ServicePrincipal) -> ServicePrincipal + + TODO: Write description later when this method is implemented + + :param service_principal: :class:`ServicePrincipal` + Required. Service principal to be created in + + :returns: :class:`ServicePrincipal` + + + .. py:method:: create_user(user: User) -> User + + TODO: Write description later when this method is implemented + + :param user: :class:`User` + Required. User to be created in + + :returns: :class:`User` + + + .. py:method:: create_workspace_access_detail(parent: str, workspace_access_detail: WorkspaceAccessDetail) -> WorkspaceAccessDetail + + TODO: Write description later when this method is implemented + + :param parent: str + Required. The parent path for workspace access detail. + :param workspace_access_detail: :class:`WorkspaceAccessDetail` + Required. Workspace access detail to be created in . + + :returns: :class:`WorkspaceAccessDetail` + + + .. py:method:: delete_group(internal_id: int) + + TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the group in Databricks. + + + + + .. py:method:: delete_service_principal(internal_id: int) + + TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the service principal in Databricks. + + + + + .. py:method:: delete_user(internal_id: int) + + TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the user in Databricks. + + + + + .. py:method:: delete_workspace_access_detail(workspace_id: int, principal_id: int) + + TODO: Write description later when this method is implemented + + :param workspace_id: int + The workspace ID where the principal has access. + :param principal_id: int + Required. ID of the principal in Databricks to delete workspace access for. + + + + + .. py:method:: get_group(internal_id: int) -> Group + + TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the group in Databricks. + + :returns: :class:`Group` + + + .. py:method:: get_service_principal(internal_id: int) -> ServicePrincipal + + TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the service principal in Databricks. + + :returns: :class:`ServicePrincipal` + + + .. py:method:: get_user(internal_id: int) -> User + + TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the user in Databricks. + + :returns: :class:`User` + + .. py:method:: get_workspace_access_detail(workspace_id: int, principal_id: int [, view: Optional[WorkspaceAccessDetailView]]) -> WorkspaceAccessDetail Returns the access details for a principal in a workspace. Allows for checking access details for any @@ -25,6 +139,60 @@ :returns: :class:`WorkspaceAccessDetail` + .. py:method:: list_groups( [, page_size: Optional[int], page_token: Optional[str]]) -> ListGroupsResponse + + TODO: Write description later when this method is implemented + + :param page_size: int (optional) + The maximum number of groups to return. The service may return fewer than this value. + :param page_token: str (optional) + A page token, received from a previous ListGroups call. Provide this to retrieve the subsequent + page. + + :returns: :class:`ListGroupsResponse` + + + .. py:method:: list_service_principals( [, page_size: Optional[int], page_token: Optional[str]]) -> ListServicePrincipalsResponse + + TODO: Write description later when this method is implemented + + :param page_size: int (optional) + The maximum number of service principals to return. The service may return fewer than this value. + :param page_token: str (optional) + A page token, received from a previous ListServicePrincipals call. Provide this to retrieve the + subsequent page. + + :returns: :class:`ListServicePrincipalsResponse` + + + .. py:method:: list_users( [, page_size: Optional[int], page_token: Optional[str]]) -> ListUsersResponse + + TODO: Write description later when this method is implemented + + :param page_size: int (optional) + The maximum number of users to return. The service may return fewer than this value. + :param page_token: str (optional) + A page token, received from a previous ListUsers call. Provide this to retrieve the subsequent page. + + :returns: :class:`ListUsersResponse` + + + .. py:method:: list_workspace_access_details(workspace_id: int [, page_size: Optional[int], page_token: Optional[str]]) -> ListWorkspaceAccessDetailsResponse + + TODO: Write description later when this method is implemented + + :param workspace_id: int + The workspace ID for which the workspace access details are being fetched. + :param page_size: int (optional) + The maximum number of workspace access details to return. The service may return fewer than this + value. + :param page_token: str (optional) + A page token, received from a previous ListWorkspaceAccessDetails call. Provide this to retrieve the + subsequent page. + + :returns: :class:`ListWorkspaceAccessDetailsResponse` + + .. py:method:: resolve_group(external_id: str) -> ResolveGroupResponse Resolves a group with the given external ID from the customer's IdP. If the group does not exist, it @@ -59,4 +227,62 @@ Required. The external ID of the user in the customer's IdP. :returns: :class:`ResolveUserResponse` + + + .. py:method:: update_group(internal_id: int, group: Group, update_mask: str) -> Group + + TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the group in Databricks. + :param group: :class:`Group` + Required. Group to be updated in + :param update_mask: str + Optional. The list of fields to update. + + :returns: :class:`Group` + + + .. py:method:: update_service_principal(internal_id: int, service_principal: ServicePrincipal, update_mask: str) -> ServicePrincipal + + TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the service principal in Databricks. + :param service_principal: :class:`ServicePrincipal` + Required. Service Principal to be updated in + :param update_mask: str + Optional. The list of fields to update. + + :returns: :class:`ServicePrincipal` + + + .. py:method:: update_user(internal_id: int, user: User, update_mask: str) -> User + + TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the user in Databricks. + :param user: :class:`User` + Required. User to be updated in + :param update_mask: str + Optional. The list of fields to update. + + :returns: :class:`User` + + + .. py:method:: update_workspace_access_detail(workspace_id: int, principal_id: int, workspace_access_detail: WorkspaceAccessDetail, update_mask: str) -> WorkspaceAccessDetail + + TODO: Write description later when this method is implemented + + :param workspace_id: int + Required. The workspace ID for which the workspace access detail is being updated. + :param principal_id: int + Required. ID of the principal in Databricks. + :param workspace_access_detail: :class:`WorkspaceAccessDetail` + Required. Workspace access detail to be updated in + :param update_mask: str + Optional. The list of fields to update. + + :returns: :class:`WorkspaceAccessDetail` \ No newline at end of file diff --git a/docs/account/provisioning/credentials.rst b/docs/account/provisioning/credentials.rst index b71c1707e..d63648d58 100644 --- a/docs/account/provisioning/credentials.rst +++ b/docs/account/provisioning/credentials.rst @@ -24,15 +24,15 @@ a = AccountClient() - creds = a.credentials.create( + role = a.credentials.create( credentials_name=f"sdk-{time.time_ns()}", aws_credentials=provisioning.CreateCredentialAwsCredentials( - sts_role=provisioning.CreateCredentialStsRole(role_arn=os.environ["TEST_LOGDELIVERY_ARN"]) + sts_role=provisioning.CreateCredentialStsRole(role_arn=os.environ["TEST_CROSSACCOUNT_ARN"]) ), ) # cleanup - a.credentials.delete(credentials_id=creds.credentials_id) + a.credentials.delete(credentials_id=role.credentials_id) Creates a Databricks credential configuration that represents cloud cross-account credentials for a specified account. Databricks uses this to set up network infrastructure properly to host Databricks diff --git a/docs/account/provisioning/storage.rst b/docs/account/provisioning/storage.rst index 41a04deb3..25ee5abaa 100644 --- a/docs/account/provisioning/storage.rst +++ b/docs/account/provisioning/storage.rst @@ -23,13 +23,10 @@ a = AccountClient() - bucket = a.storage.create( + storage = a.storage.create( storage_configuration_name=f"sdk-{time.time_ns()}", root_bucket_info=provisioning.RootBucketInfo(bucket_name=f"sdk-{time.time_ns()}"), ) - - # cleanup - a.storage.delete(storage_configuration_id=bucket.storage_configuration_id) Creates a Databricks storage configuration for an account. diff --git a/docs/dbdataclasses/apps.rst b/docs/dbdataclasses/apps.rst index 320c875e1..e812a06c9 100644 --- a/docs/dbdataclasses/apps.rst +++ b/docs/dbdataclasses/apps.rst @@ -366,10 +366,18 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: EnvVar + :members: + :undoc-members: + .. autoclass:: GetAppPermissionLevelsResponse :members: :undoc-members: +.. autoclass:: GitSource + :members: + :undoc-members: + .. autoclass:: ListAppDeploymentsResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/billing.rst b/docs/dbdataclasses/billing.rst index 60f015a7b..e9a753418 100644 --- a/docs/dbdataclasses/billing.rst +++ b/docs/dbdataclasses/billing.rst @@ -140,6 +140,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ListUsagePoliciesResponse + :members: + :undoc-members: + .. py:class:: LogDeliveryConfigStatus * Log Delivery Status @@ -208,6 +212,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: USAGE_DASHBOARD_TYPE_WORKSPACE :value: "USAGE_DASHBOARD_TYPE_WORKSPACE" +.. autoclass:: UsagePolicy + :members: + :undoc-members: + .. autoclass:: WrappedLogDeliveryConfiguration :members: :undoc-members: diff --git a/docs/dbdataclasses/catalog.rst b/docs/dbdataclasses/catalog.rst index 44209d4b9..ff2b4eec4 100644 --- a/docs/dbdataclasses/catalog.rst +++ b/docs/dbdataclasses/catalog.rst @@ -279,7 +279,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: ConnectionType - Next Id: 47 + Next Id: 48 .. py:attribute:: BIGQUERY :value: "BIGQUERY" @@ -348,6 +348,18 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ConversionInfo + :members: + :undoc-members: + +.. py:class:: ConversionInfoState + + .. py:attribute:: COMPLETED + :value: "COMPLETED" + + .. py:attribute:: IN_PROGRESS + :value: "IN_PROGRESS" + .. autoclass:: CreateAccessRequest :members: :undoc-members: @@ -642,6 +654,18 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: DrReplicationInfo + :members: + :undoc-members: + +.. py:class:: DrReplicationStatus + + .. py:attribute:: DR_REPLICATION_STATUS_PRIMARY + :value: "DR_REPLICATION_STATUS_PRIMARY" + + .. py:attribute:: DR_REPLICATION_STATUS_SECONDARY + :value: "DR_REPLICATION_STATUS_SECONDARY" + .. autoclass:: EffectivePermissionsList :members: :undoc-members: @@ -689,6 +713,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: EnvironmentSettings + :members: + :undoc-members: + .. autoclass:: ExternalLineageExternalMetadata :members: :undoc-members: @@ -1500,7 +1528,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: SecurableKind - Latest kind: CONNECTION_AWS_SECRETS_MANAGER = 270; Next id:271 + Latest kind: CONNECTION_SLACK_OAUTH_U2M_MAPPING = 272; Next id:273 .. py:attribute:: TABLE_DB_STORAGE :value: "TABLE_DB_STORAGE" @@ -1935,6 +1963,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: UcEncryptedToken + :members: + :undoc-members: + .. autoclass:: UnassignResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/compute.rst b/docs/dbdataclasses/compute.rst index c6064252a..8cba5c02a 100644 --- a/docs/dbdataclasses/compute.rst +++ b/docs/dbdataclasses/compute.rst @@ -51,6 +51,16 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: SPOT_WITH_FALLBACK_AZURE :value: "SPOT_WITH_FALLBACK_AZURE" +.. py:class:: BaseEnvironmentType + + If changed, also update estore/namespaces/defaultbaseenvironments/latest.proto + + .. py:attribute:: CPU + :value: "CPU" + + .. py:attribute:: GPU + :value: "GPU" + .. autoclass:: CancelResponse :members: :undoc-members: @@ -317,6 +327,34 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: DefaultBaseEnvironment + :members: + :undoc-members: + +.. autoclass:: DefaultBaseEnvironmentCache + :members: + :undoc-members: + +.. py:class:: DefaultBaseEnvironmentCacheStatus + + .. py:attribute:: CREATED + :value: "CREATED" + + .. py:attribute:: EXPIRED + :value: "EXPIRED" + + .. py:attribute:: FAILED + :value: "FAILED" + + .. py:attribute:: INVALID + :value: "INVALID" + + .. py:attribute:: PENDING + :value: "PENDING" + + .. py:attribute:: REFRESHING + :value: "REFRESHING" + .. autoclass:: DeleteClusterResponse :members: :undoc-members: @@ -831,6 +869,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: DEFAULT :value: "DEFAULT" +.. autoclass:: ListDefaultBaseEnvironmentsResponse + :members: + :undoc-members: + .. autoclass:: ListGlobalInitScriptsResponse :members: :undoc-members: @@ -883,6 +925,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: MaterializedEnvironment + :members: + :undoc-members: + .. autoclass:: MavenLibrary :members: :undoc-members: @@ -895,6 +941,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: NodeTypeFlexibility + :members: + :undoc-members: + .. autoclass:: PendingInstanceError :members: :undoc-members: @@ -923,6 +973,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: RefreshDefaultBaseEnvironmentsResponse + :members: + :undoc-members: + .. autoclass:: RemoveResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/dashboards.rst b/docs/dbdataclasses/dashboards.rst index df004c847..d9140e9d6 100644 --- a/docs/dbdataclasses/dashboards.rst +++ b/docs/dbdataclasses/dashboards.rst @@ -12,6 +12,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: CancelQueryExecutionResponse + :members: + :undoc-members: + +.. autoclass:: CancelQueryExecutionResponseStatus + :members: + :undoc-members: + .. autoclass:: CronSchedule :members: :undoc-members: @@ -25,6 +33,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: DASHBOARD_VIEW_BASIC :value: "DASHBOARD_VIEW_BASIC" +.. autoclass:: Empty + :members: + :undoc-members: + +.. autoclass:: ExecuteQueryResponse + :members: + :undoc-members: + .. autoclass:: GenieAttachment :members: :undoc-members: @@ -54,6 +70,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: POSITIVE :value: "POSITIVE" +.. autoclass:: GenieGenerateDownloadFullQueryResultResponse + :members: + :undoc-members: + +.. autoclass:: GenieGetDownloadFullQueryResultResponse + :members: + :undoc-members: + .. autoclass:: GenieGetMessageQueryResultResponse :members: :undoc-members: @@ -94,6 +118,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: GetPublishedDashboardEmbeddedResponse + :members: + :undoc-members: + .. autoclass:: GetPublishedDashboardTokenInfoResponse :members: :undoc-members: @@ -341,6 +369,18 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: SUBMITTED :value: "SUBMITTED" +.. autoclass:: PendingStatus + :members: + :undoc-members: + +.. autoclass:: PollQueryStatusResponse + :members: + :undoc-members: + +.. autoclass:: PollQueryStatusResponseData + :members: + :undoc-members: + .. autoclass:: PublishedDashboard :members: :undoc-members: @@ -349,6 +389,18 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: QueryResponseStatus + :members: + :undoc-members: + +.. py:class:: ResponsePhase + + .. py:attribute:: RESPONSE_PHASE_THINKING + :value: "RESPONSE_PHASE_THINKING" + + .. py:attribute:: RESPONSE_PHASE_VERIFYING + :value: "RESPONSE_PHASE_VERIFYING" + .. autoclass:: Result :members: :undoc-members: @@ -381,10 +433,21 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: SuccessStatus + :members: + :undoc-members: + .. autoclass:: TextAttachment :members: :undoc-members: +.. py:class:: TextAttachmentPurpose + + Purpose/intent of a text attachment + + .. py:attribute:: FOLLOW_UP_QUESTION + :value: "FOLLOW_UP_QUESTION" + .. autoclass:: TrashDashboardResponse :members: :undoc-members: @@ -392,3 +455,23 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. autoclass:: UnpublishDashboardResponse :members: :undoc-members: + +.. autoclass:: VerificationMetadata + :members: + :undoc-members: + +.. py:class:: VerificationSection + + Verification workflow section - indicates which stage of verification this attachment belongs to These sections are used for grouping and ordering attachments in the frontend UI + + .. py:attribute:: VERIFICATION_SECTION_FINAL_DECISION + :value: "VERIFICATION_SECTION_FINAL_DECISION" + + .. py:attribute:: VERIFICATION_SECTION_PROPOSED_IMPROVEMENT + :value: "VERIFICATION_SECTION_PROPOSED_IMPROVEMENT" + + .. py:attribute:: VERIFICATION_SECTION_SQL_EXAMPLES_VALIDATION + :value: "VERIFICATION_SECTION_SQL_EXAMPLES_VALIDATION" + + .. py:attribute:: VERIFICATION_SECTION_VERIFICATION_QUERIES + :value: "VERIFICATION_SECTION_VERIFICATION_QUERIES" diff --git a/docs/dbdataclasses/dataquality.rst b/docs/dbdataclasses/dataquality.rst index da8a02369..0b404d565 100644 --- a/docs/dbdataclasses/dataquality.rst +++ b/docs/dbdataclasses/dataquality.rst @@ -42,6 +42,16 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. py:class:: AnomalyDetectionJobType + + Anomaly Detection job type. + + .. py:attribute:: ANOMALY_DETECTION_JOB_TYPE_INTERNAL_HIDDEN + :value: "ANOMALY_DETECTION_JOB_TYPE_INTERNAL_HIDDEN" + + .. py:attribute:: ANOMALY_DETECTION_JOB_TYPE_NORMAL + :value: "ANOMALY_DETECTION_JOB_TYPE_NORMAL" + .. autoclass:: CancelRefreshResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/iamv2.rst b/docs/dbdataclasses/iamv2.rst index b77239ca6..7a4cd6620 100644 --- a/docs/dbdataclasses/iamv2.rst +++ b/docs/dbdataclasses/iamv2.rst @@ -8,6 +8,22 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ListGroupsResponse + :members: + :undoc-members: + +.. autoclass:: ListServicePrincipalsResponse + :members: + :undoc-members: + +.. autoclass:: ListUsersResponse + :members: + :undoc-members: + +.. autoclass:: ListWorkspaceAccessDetailsResponse + :members: + :undoc-members: + .. py:class:: PrincipalType The type of the principal (user/sp/group). diff --git a/docs/dbdataclasses/index.rst b/docs/dbdataclasses/index.rst index 3d3a3af75..64fc83742 100644 --- a/docs/dbdataclasses/index.rst +++ b/docs/dbdataclasses/index.rst @@ -22,6 +22,7 @@ Dataclasses ml oauth2 pipelines + postgres provisioning qualitymonitorv2 serving diff --git a/docs/dbdataclasses/jobs.rst b/docs/dbdataclasses/jobs.rst index 04a47acf2..955b38561 100644 --- a/docs/dbdataclasses/jobs.rst +++ b/docs/dbdataclasses/jobs.rst @@ -484,6 +484,21 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ModelTriggerConfiguration + :members: + :undoc-members: + +.. py:class:: ModelTriggerConfigurationCondition + + .. py:attribute:: MODEL_ALIAS_SET + :value: "MODEL_ALIAS_SET" + + .. py:attribute:: MODEL_CREATED + :value: "MODEL_CREATED" + + .. py:attribute:: MODEL_VERSION_READY + :value: "MODEL_VERSION_READY" + .. autoclass:: NotebookOutput :members: :undoc-members: diff --git a/docs/dbdataclasses/ml.rst b/docs/dbdataclasses/ml.rst index 844e66245..f7a3373ff 100644 --- a/docs/dbdataclasses/ml.rst +++ b/docs/dbdataclasses/ml.rst @@ -377,6 +377,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: GetLoggedModelsRequestResponse + :members: + :undoc-members: + .. autoclass:: GetMetricHistoryResponse :members: :undoc-members: @@ -604,6 +608,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: OnlineStoreConfig + :members: + :undoc-members: + .. py:class:: OnlineStoreState .. py:attribute:: AVAILABLE diff --git a/docs/dbdataclasses/pipelines.rst b/docs/dbdataclasses/pipelines.rst index cc2f14411..a46409ab2 100644 --- a/docs/dbdataclasses/pipelines.rst +++ b/docs/dbdataclasses/pipelines.rst @@ -4,6 +4,14 @@ Delta Live Tables These dataclasses are used in the SDK to represent API requests and responses for services in the ``databricks.sdk.service.pipelines`` module. .. py:currentmodule:: databricks.sdk.service.pipelines +.. autoclass:: ApplyEnvironmentRequestResponse + :members: + :undoc-members: + +.. autoclass:: ConnectionParameters + :members: + :undoc-members: + .. autoclass:: CreatePipelineResponse :members: :undoc-members: @@ -398,6 +406,18 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: RestorePipelineRequestResponse + :members: + :undoc-members: + +.. autoclass:: RewindDatasetSpec + :members: + :undoc-members: + +.. autoclass:: RewindSpec + :members: + :undoc-members: + .. autoclass:: RunAs :members: :undoc-members: @@ -546,6 +566,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: WAITING_FOR_RESOURCES :value: "WAITING_FOR_RESOURCES" +.. py:class:: UpdateMode + + .. py:attribute:: CONTINUOUS + :value: "CONTINUOUS" + + .. py:attribute:: DEFAULT + :value: "DEFAULT" + .. autoclass:: UpdateStateInfo :members: :undoc-members: diff --git a/docs/dbdataclasses/postgres.rst b/docs/dbdataclasses/postgres.rst new file mode 100644 index 000000000..501011b72 --- /dev/null +++ b/docs/dbdataclasses/postgres.rst @@ -0,0 +1,342 @@ +Postgres +======== + +These dataclasses are used in the SDK to represent API requests and responses for services in the ``databricks.sdk.service.postgres`` module. + +.. py:currentmodule:: databricks.sdk.service.postgres +.. autoclass:: DatabaseBranch + :members: + :undoc-members: + +.. autoclass:: DatabaseBranchOperationMetadata + :members: + :undoc-members: + +.. autoclass:: DatabaseEndpoint + :members: + :undoc-members: + +.. autoclass:: DatabaseEndpointOperationMetadata + :members: + :undoc-members: + +.. py:class:: DatabaseEndpointPoolerMode + + The connection pooler mode. Lakebase supports PgBouncer in `transaction` mode only. + + .. py:attribute:: TRANSACTION + :value: "TRANSACTION" + +.. autoclass:: DatabaseEndpointSettings + :members: + :undoc-members: + +.. py:class:: DatabaseEndpointState + + The state of the compute endpoint + + .. py:attribute:: ACTIVE + :value: "ACTIVE" + + .. py:attribute:: IDLE + :value: "IDLE" + + .. py:attribute:: INIT + :value: "INIT" + +.. py:class:: DatabaseEndpointType + + The compute endpoint type. Either `read_write` or `read_only`. + + .. py:attribute:: READ_ONLY + :value: "READ_ONLY" + + .. py:attribute:: READ_WRITE + :value: "READ_WRITE" + +.. autoclass:: DatabaseProject + :members: + :undoc-members: + +.. autoclass:: DatabaseProjectCustomTag + :members: + :undoc-members: + +.. autoclass:: DatabaseProjectDefaultEndpointSettings + :members: + :undoc-members: + +.. autoclass:: DatabaseProjectOperationMetadata + :members: + :undoc-members: + +.. autoclass:: DatabaseProjectSettings + :members: + :undoc-members: + +.. autoclass:: DatabricksServiceExceptionWithDetailsProto + :members: + :undoc-members: + +.. py:class:: ErrorCode + + Legacy definition of the ErrorCode enum. Please keep in sync with api-base/proto/error_code.proto (except status code mapping annotations as this file doesn't have them). Will be removed eventually, pending the ScalaPB 0.4 cleanup. + + .. py:attribute:: ABORTED + :value: "ABORTED" + + .. py:attribute:: ALREADY_EXISTS + :value: "ALREADY_EXISTS" + + .. py:attribute:: BAD_REQUEST + :value: "BAD_REQUEST" + + .. py:attribute:: CANCELLED + :value: "CANCELLED" + + .. py:attribute:: CATALOG_ALREADY_EXISTS + :value: "CATALOG_ALREADY_EXISTS" + + .. py:attribute:: CATALOG_DOES_NOT_EXIST + :value: "CATALOG_DOES_NOT_EXIST" + + .. py:attribute:: CATALOG_NOT_EMPTY + :value: "CATALOG_NOT_EMPTY" + + .. py:attribute:: COULD_NOT_ACQUIRE_LOCK + :value: "COULD_NOT_ACQUIRE_LOCK" + + .. py:attribute:: CUSTOMER_UNAUTHORIZED + :value: "CUSTOMER_UNAUTHORIZED" + + .. py:attribute:: DAC_ALREADY_EXISTS + :value: "DAC_ALREADY_EXISTS" + + .. py:attribute:: DAC_DOES_NOT_EXIST + :value: "DAC_DOES_NOT_EXIST" + + .. py:attribute:: DATA_LOSS + :value: "DATA_LOSS" + + .. py:attribute:: DEADLINE_EXCEEDED + :value: "DEADLINE_EXCEEDED" + + .. py:attribute:: DEPLOYMENT_TIMEOUT + :value: "DEPLOYMENT_TIMEOUT" + + .. py:attribute:: DIRECTORY_NOT_EMPTY + :value: "DIRECTORY_NOT_EMPTY" + + .. py:attribute:: DIRECTORY_PROTECTED + :value: "DIRECTORY_PROTECTED" + + .. py:attribute:: DRY_RUN_FAILED + :value: "DRY_RUN_FAILED" + + .. py:attribute:: ENDPOINT_NOT_FOUND + :value: "ENDPOINT_NOT_FOUND" + + .. py:attribute:: EXTERNAL_LOCATION_ALREADY_EXISTS + :value: "EXTERNAL_LOCATION_ALREADY_EXISTS" + + .. py:attribute:: EXTERNAL_LOCATION_DOES_NOT_EXIST + :value: "EXTERNAL_LOCATION_DOES_NOT_EXIST" + + .. py:attribute:: FEATURE_DISABLED + :value: "FEATURE_DISABLED" + + .. py:attribute:: GIT_CONFLICT + :value: "GIT_CONFLICT" + + .. py:attribute:: GIT_REMOTE_ERROR + :value: "GIT_REMOTE_ERROR" + + .. py:attribute:: GIT_SENSITIVE_TOKEN_DETECTED + :value: "GIT_SENSITIVE_TOKEN_DETECTED" + + .. py:attribute:: GIT_UNKNOWN_REF + :value: "GIT_UNKNOWN_REF" + + .. py:attribute:: GIT_URL_NOT_ON_ALLOW_LIST + :value: "GIT_URL_NOT_ON_ALLOW_LIST" + + .. py:attribute:: INSECURE_PARTNER_RESPONSE + :value: "INSECURE_PARTNER_RESPONSE" + + .. py:attribute:: INTERNAL_ERROR + :value: "INTERNAL_ERROR" + + .. py:attribute:: INVALID_PARAMETER_VALUE + :value: "INVALID_PARAMETER_VALUE" + + .. py:attribute:: INVALID_STATE + :value: "INVALID_STATE" + + .. py:attribute:: INVALID_STATE_TRANSITION + :value: "INVALID_STATE_TRANSITION" + + .. py:attribute:: IO_ERROR + :value: "IO_ERROR" + + .. py:attribute:: IPYNB_FILE_IN_REPO + :value: "IPYNB_FILE_IN_REPO" + + .. py:attribute:: MALFORMED_PARTNER_RESPONSE + :value: "MALFORMED_PARTNER_RESPONSE" + + .. py:attribute:: MALFORMED_REQUEST + :value: "MALFORMED_REQUEST" + + .. py:attribute:: MANAGED_RESOURCE_GROUP_DOES_NOT_EXIST + :value: "MANAGED_RESOURCE_GROUP_DOES_NOT_EXIST" + + .. py:attribute:: MAX_BLOCK_SIZE_EXCEEDED + :value: "MAX_BLOCK_SIZE_EXCEEDED" + + .. py:attribute:: MAX_CHILD_NODE_SIZE_EXCEEDED + :value: "MAX_CHILD_NODE_SIZE_EXCEEDED" + + .. py:attribute:: MAX_LIST_SIZE_EXCEEDED + :value: "MAX_LIST_SIZE_EXCEEDED" + + .. py:attribute:: MAX_NOTEBOOK_SIZE_EXCEEDED + :value: "MAX_NOTEBOOK_SIZE_EXCEEDED" + + .. py:attribute:: MAX_READ_SIZE_EXCEEDED + :value: "MAX_READ_SIZE_EXCEEDED" + + .. py:attribute:: METASTORE_ALREADY_EXISTS + :value: "METASTORE_ALREADY_EXISTS" + + .. py:attribute:: METASTORE_DOES_NOT_EXIST + :value: "METASTORE_DOES_NOT_EXIST" + + .. py:attribute:: METASTORE_NOT_EMPTY + :value: "METASTORE_NOT_EMPTY" + + .. py:attribute:: NOT_FOUND + :value: "NOT_FOUND" + + .. py:attribute:: NOT_IMPLEMENTED + :value: "NOT_IMPLEMENTED" + + .. py:attribute:: PARTIAL_DELETE + :value: "PARTIAL_DELETE" + + .. py:attribute:: PERMISSION_DENIED + :value: "PERMISSION_DENIED" + + .. py:attribute:: PERMISSION_NOT_PROPAGATED + :value: "PERMISSION_NOT_PROPAGATED" + + .. py:attribute:: PRINCIPAL_DOES_NOT_EXIST + :value: "PRINCIPAL_DOES_NOT_EXIST" + + .. py:attribute:: PROJECTS_OPERATION_TIMEOUT + :value: "PROJECTS_OPERATION_TIMEOUT" + + .. py:attribute:: PROVIDER_ALREADY_EXISTS + :value: "PROVIDER_ALREADY_EXISTS" + + .. py:attribute:: PROVIDER_DOES_NOT_EXIST + :value: "PROVIDER_DOES_NOT_EXIST" + + .. py:attribute:: PROVIDER_SHARE_NOT_ACCESSIBLE + :value: "PROVIDER_SHARE_NOT_ACCESSIBLE" + + .. py:attribute:: QUOTA_EXCEEDED + :value: "QUOTA_EXCEEDED" + + .. py:attribute:: RECIPIENT_ALREADY_EXISTS + :value: "RECIPIENT_ALREADY_EXISTS" + + .. py:attribute:: RECIPIENT_DOES_NOT_EXIST + :value: "RECIPIENT_DOES_NOT_EXIST" + + .. py:attribute:: REQUEST_LIMIT_EXCEEDED + :value: "REQUEST_LIMIT_EXCEEDED" + + .. py:attribute:: RESOURCE_ALREADY_EXISTS + :value: "RESOURCE_ALREADY_EXISTS" + + .. py:attribute:: RESOURCE_CONFLICT + :value: "RESOURCE_CONFLICT" + + .. py:attribute:: RESOURCE_DOES_NOT_EXIST + :value: "RESOURCE_DOES_NOT_EXIST" + + .. py:attribute:: RESOURCE_EXHAUSTED + :value: "RESOURCE_EXHAUSTED" + + .. py:attribute:: RESOURCE_LIMIT_EXCEEDED + :value: "RESOURCE_LIMIT_EXCEEDED" + + .. py:attribute:: SCHEMA_ALREADY_EXISTS + :value: "SCHEMA_ALREADY_EXISTS" + + .. py:attribute:: SCHEMA_DOES_NOT_EXIST + :value: "SCHEMA_DOES_NOT_EXIST" + + .. py:attribute:: SCHEMA_NOT_EMPTY + :value: "SCHEMA_NOT_EMPTY" + + .. py:attribute:: SEARCH_QUERY_TOO_LONG + :value: "SEARCH_QUERY_TOO_LONG" + + .. py:attribute:: SEARCH_QUERY_TOO_SHORT + :value: "SEARCH_QUERY_TOO_SHORT" + + .. py:attribute:: SERVICE_UNDER_MAINTENANCE + :value: "SERVICE_UNDER_MAINTENANCE" + + .. py:attribute:: SHARE_ALREADY_EXISTS + :value: "SHARE_ALREADY_EXISTS" + + .. py:attribute:: SHARE_DOES_NOT_EXIST + :value: "SHARE_DOES_NOT_EXIST" + + .. py:attribute:: STORAGE_CREDENTIAL_ALREADY_EXISTS + :value: "STORAGE_CREDENTIAL_ALREADY_EXISTS" + + .. py:attribute:: STORAGE_CREDENTIAL_DOES_NOT_EXIST + :value: "STORAGE_CREDENTIAL_DOES_NOT_EXIST" + + .. py:attribute:: TABLE_ALREADY_EXISTS + :value: "TABLE_ALREADY_EXISTS" + + .. py:attribute:: TABLE_DOES_NOT_EXIST + :value: "TABLE_DOES_NOT_EXIST" + + .. py:attribute:: TEMPORARILY_UNAVAILABLE + :value: "TEMPORARILY_UNAVAILABLE" + + .. py:attribute:: UNAUTHENTICATED + :value: "UNAUTHENTICATED" + + .. py:attribute:: UNAVAILABLE + :value: "UNAVAILABLE" + + .. py:attribute:: UNKNOWN + :value: "UNKNOWN" + + .. py:attribute:: UNPARSEABLE_HTTP_ERROR + :value: "UNPARSEABLE_HTTP_ERROR" + + .. py:attribute:: WORKSPACE_TEMPORARILY_UNAVAILABLE + :value: "WORKSPACE_TEMPORARILY_UNAVAILABLE" + +.. autoclass:: ListDatabaseBranchesResponse + :members: + :undoc-members: + +.. autoclass:: ListDatabaseEndpointsResponse + :members: + :undoc-members: + +.. autoclass:: ListDatabaseProjectsResponse + :members: + :undoc-members: + +.. autoclass:: Operation + :members: + :undoc-members: diff --git a/docs/dbdataclasses/qualitymonitorv2.rst b/docs/dbdataclasses/qualitymonitorv2.rst index 9f4df6ee6..95e7fbae5 100644 --- a/docs/dbdataclasses/qualitymonitorv2.rst +++ b/docs/dbdataclasses/qualitymonitorv2.rst @@ -8,6 +8,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. py:class:: AnomalyDetectionJobType + + .. py:attribute:: ANOMALY_DETECTION_JOB_TYPE_INTERNAL_HIDDEN + :value: "ANOMALY_DETECTION_JOB_TYPE_INTERNAL_HIDDEN" + + .. py:attribute:: ANOMALY_DETECTION_JOB_TYPE_NORMAL + :value: "ANOMALY_DETECTION_JOB_TYPE_NORMAL" + .. py:class:: AnomalyDetectionRunStatus Status of Anomaly Detection Job Run diff --git a/docs/dbdataclasses/settings.rst b/docs/dbdataclasses/settings.rst index b47a84770..67f27cf12 100644 --- a/docs/dbdataclasses/settings.rst +++ b/docs/dbdataclasses/settings.rst @@ -208,6 +208,12 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionState + .. py:attribute:: CREATE_FAILED + :value: "CREATE_FAILED" + + .. py:attribute:: CREATING + :value: "CREATING" + .. py:attribute:: DISCONNECTED :value: "DISCONNECTED" @@ -508,6 +514,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: GcpEndpointSpec + :members: + :undoc-members: + .. autoclass:: GenericWebhookConfig :members: :undoc-members: @@ -601,6 +611,12 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: NccAzurePrivateEndpointRuleConnectionState + .. py:attribute:: CREATE_FAILED + :value: "CREATE_FAILED" + + .. py:attribute:: CREATING + :value: "CREATING" + .. py:attribute:: DISCONNECTED :value: "DISCONNECTED" @@ -662,6 +678,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: REJECTED :value: "REJECTED" +.. autoclass:: NetworkConnectivityConfigEgressConfigDefaultRuleGcpProjectIdRule + :members: + :undoc-members: + .. autoclass:: NetworkConnectivityConfiguration :members: :undoc-members: diff --git a/docs/dbdataclasses/tags.rst b/docs/dbdataclasses/tags.rst index 23eb1d728..02892350f 100644 --- a/docs/dbdataclasses/tags.rst +++ b/docs/dbdataclasses/tags.rst @@ -4,10 +4,18 @@ Tags These dataclasses are used in the SDK to represent API requests and responses for services in the ``databricks.sdk.service.tags`` module. .. py:currentmodule:: databricks.sdk.service.tags +.. autoclass:: ListTagAssignmentsResponse + :members: + :undoc-members: + .. autoclass:: ListTagPoliciesResponse :members: :undoc-members: +.. autoclass:: TagAssignment + :members: + :undoc-members: + .. autoclass:: TagPolicy :members: :undoc-members: diff --git a/docs/dbdataclasses/vectorsearch.rst b/docs/dbdataclasses/vectorsearch.rst index b8bd46536..2f4e8e8ac 100644 --- a/docs/dbdataclasses/vectorsearch.rst +++ b/docs/dbdataclasses/vectorsearch.rst @@ -4,6 +4,10 @@ Vector Search These dataclasses are used in the SDK to represent API requests and responses for services in the ``databricks.sdk.service.vectorsearch`` module. .. py:currentmodule:: databricks.sdk.service.vectorsearch +.. autoclass:: AdjustedThroughputRequest + :members: + :undoc-members: + .. autoclass:: ColumnInfo :members: :undoc-members: @@ -86,6 +90,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: YELLOW_STATE :value: "YELLOW_STATE" +.. autoclass:: EndpointThroughputInfo + :members: + :undoc-members: + .. py:class:: EndpointType Type of endpoint. @@ -109,6 +117,22 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: Metric + :members: + :undoc-members: + +.. autoclass:: MetricLabel + :members: + :undoc-members: + +.. autoclass:: MetricValue + :members: + :undoc-members: + +.. autoclass:: MetricValues + :members: + :undoc-members: + .. autoclass:: MiniVectorIndex :members: :undoc-members: @@ -117,6 +141,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: PatchEndpointThroughputResponse + :members: + :undoc-members: + .. py:class:: PipelineType Pipeline execution mode. - `TRIGGERED`: If the pipeline uses the triggered execution mode, the system stops processing after successfully refreshing the source table in the pipeline once, ensuring the table is updated based on the data available when the update started. - `CONTINUOUS`: If the pipeline uses continuous execution, the pipeline processes new data as it arrives in the source table to keep vector index fresh. @@ -147,6 +175,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: RetrieveUserVisibleMetricsResponse + :members: + :undoc-members: + .. autoclass:: ScanVectorIndexResponse :members: :undoc-members: @@ -159,10 +191,49 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. py:class:: ThroughputChangeRequestState + + Throughput change request state + + .. py:attribute:: CHANGE_ADJUSTED + :value: "CHANGE_ADJUSTED" + + .. py:attribute:: CHANGE_FAILED + :value: "CHANGE_FAILED" + + .. py:attribute:: CHANGE_IN_PROGRESS + :value: "CHANGE_IN_PROGRESS" + + .. py:attribute:: CHANGE_REACHED_MAXIMUM + :value: "CHANGE_REACHED_MAXIMUM" + + .. py:attribute:: CHANGE_REACHED_MINIMUM + :value: "CHANGE_REACHED_MINIMUM" + + .. py:attribute:: CHANGE_SUCCESS + :value: "CHANGE_SUCCESS" + +.. py:class:: ThroughputPatchStatus + + Response status for throughput change requests + + .. py:attribute:: PATCH_ACCEPTED + :value: "PATCH_ACCEPTED" + + .. py:attribute:: PATCH_FAILED + :value: "PATCH_FAILED" + + .. py:attribute:: PATCH_REJECTED + :value: "PATCH_REJECTED" + .. autoclass:: UpdateEndpointCustomTagsResponse :members: :undoc-members: +.. autoclass:: UpdateVectorIndexUsagePolicyResponse + :members: + :undoc-members: + .. autoclass:: UpsertDataResult :members: :undoc-members: diff --git a/docs/dbdataclasses/workspace.rst b/docs/dbdataclasses/workspace.rst index 4aebf92c5..1a87dd447 100644 --- a/docs/dbdataclasses/workspace.rst +++ b/docs/dbdataclasses/workspace.rst @@ -78,6 +78,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: SOURCE :value: "SOURCE" +.. py:class:: ExportOutputs + + .. py:attribute:: ALL + :value: "ALL" + + .. py:attribute:: NONE + :value: "NONE" + .. autoclass:: ExportResponse :members: :undoc-members: diff --git a/docs/gen-client-docs.py b/docs/gen-client-docs.py index 7e907d735..918a88faa 100644 --- a/docs/gen-client-docs.py +++ b/docs/gen-client-docs.py @@ -276,6 +276,11 @@ class Generator: "Data Quality", "Manage data quality monitoring on Unity Catalog objects." ), + Package( + "postgres", + "Postgres", + "Manage Postgres database instances." + ), ] def __init__(self): diff --git a/docs/workspace/catalog/catalogs.rst b/docs/workspace/catalog/catalogs.rst index 17297d8dd..01427acc6 100644 --- a/docs/workspace/catalog/catalogs.rst +++ b/docs/workspace/catalog/catalogs.rst @@ -11,7 +11,7 @@ the workspaces in a Databricks account. Users in different workspaces can share access to the same data, depending on privileges granted centrally in Unity Catalog. - .. py:method:: create(name: str [, comment: Optional[str], connection_name: Optional[str], options: Optional[Dict[str, str]], properties: Optional[Dict[str, str]], provider_name: Optional[str], share_name: Optional[str], storage_root: Optional[str]]) -> CatalogInfo + .. py:method:: create(name: str [, comment: Optional[str], connection_name: Optional[str], conversion_info: Optional[ConversionInfo], dr_replication_info: Optional[DrReplicationInfo], options: Optional[Dict[str, str]], properties: Optional[Dict[str, str]], provider_name: Optional[str], share_name: Optional[str], storage_root: Optional[str]]) -> CatalogInfo Usage: @@ -38,6 +38,10 @@ User-provided free-form text description. :param connection_name: str (optional) The name of the connection to an external data source. + :param conversion_info: :class:`ConversionInfo` (optional) + Status of conversion of FOREIGN catalog to UC Native catalog. + :param dr_replication_info: :class:`DrReplicationInfo` (optional) + Disaster Recovery replication state snapshot. :param options: Dict[str,str] (optional) A map of key-value properties attached to the securable. :param properties: Dict[str,str] (optional) @@ -145,7 +149,7 @@ :returns: Iterator over :class:`CatalogInfo` - .. py:method:: update(name: str [, comment: Optional[str], enable_predictive_optimization: Optional[EnablePredictiveOptimization], isolation_mode: Optional[CatalogIsolationMode], new_name: Optional[str], options: Optional[Dict[str, str]], owner: Optional[str], properties: Optional[Dict[str, str]]]) -> CatalogInfo + .. py:method:: update(name: str [, comment: Optional[str], conversion_info: Optional[ConversionInfo], dr_replication_info: Optional[DrReplicationInfo], enable_predictive_optimization: Optional[EnablePredictiveOptimization], isolation_mode: Optional[CatalogIsolationMode], new_name: Optional[str], options: Optional[Dict[str, str]], owner: Optional[str], properties: Optional[Dict[str, str]]]) -> CatalogInfo Usage: @@ -155,12 +159,13 @@ import time from databricks.sdk import WorkspaceClient + from databricks.sdk.service import catalog w = WorkspaceClient() created = w.catalogs.create(name=f"sdk-{time.time_ns()}") - _ = w.catalogs.update(name=created.name, comment="updated") + _ = w.catalogs.update(name=created.name, isolation_mode=catalog.CatalogIsolationMode.ISOLATED) # cleanup w.catalogs.delete(name=created.name, force=True) @@ -172,6 +177,10 @@ The name of the catalog. :param comment: str (optional) User-provided free-form text description. + :param conversion_info: :class:`ConversionInfo` (optional) + Status of conversion of FOREIGN catalog to UC Native catalog. + :param dr_replication_info: :class:`DrReplicationInfo` (optional) + Disaster Recovery replication state snapshot. :param enable_predictive_optimization: :class:`EnablePredictiveOptimization` (optional) Whether predictive optimization should be enabled for this object and objects under it. :param isolation_mode: :class:`CatalogIsolationMode` (optional) diff --git a/docs/workspace/catalog/connections.rst b/docs/workspace/catalog/connections.rst index acfeecd53..0674d749d 100644 --- a/docs/workspace/catalog/connections.rst +++ b/docs/workspace/catalog/connections.rst @@ -13,7 +13,7 @@ objects based on cloud storage. Users may create different types of connections with each connection having a unique set of configuration options to support credential management and other settings. - .. py:method:: create(name: str, connection_type: ConnectionType, options: Dict[str, str] [, comment: Optional[str], properties: Optional[Dict[str, str]], read_only: Optional[bool]]) -> ConnectionInfo + .. py:method:: create(name: str, connection_type: ConnectionType, options: Dict[str, str] [, comment: Optional[str], environment_settings: Optional[EnvironmentSettings], properties: Optional[Dict[str, str]], read_only: Optional[bool]]) -> ConnectionInfo Usage: @@ -54,6 +54,8 @@ A map of key-value properties attached to the securable. :param comment: str (optional) User-provided free-form text description. + :param environment_settings: :class:`EnvironmentSettings` (optional) + [Create,Update:OPT] Connection environment settings as EnvironmentSettings object. :param properties: Dict[str,str] (optional) A map of key-value properties attached to the securable. :param read_only: bool (optional) @@ -153,7 +155,7 @@ :returns: Iterator over :class:`ConnectionInfo` - .. py:method:: update(name: str, options: Dict[str, str] [, new_name: Optional[str], owner: Optional[str]]) -> ConnectionInfo + .. py:method:: update(name: str, options: Dict[str, str] [, environment_settings: Optional[EnvironmentSettings], new_name: Optional[str], owner: Optional[str]]) -> ConnectionInfo Usage: @@ -196,6 +198,8 @@ Name of the connection. :param options: Dict[str,str] A map of key-value properties attached to the securable. + :param environment_settings: :class:`EnvironmentSettings` (optional) + [Create,Update:OPT] Connection environment settings as EnvironmentSettings object. :param new_name: str (optional) New name for the connection. :param owner: str (optional) diff --git a/docs/workspace/catalog/external_locations.rst b/docs/workspace/catalog/external_locations.rst index 612800956..fdf69e38a 100644 --- a/docs/workspace/catalog/external_locations.rst +++ b/docs/workspace/catalog/external_locations.rst @@ -140,10 +140,11 @@ .. code-block:: from databricks.sdk import WorkspaceClient + from databricks.sdk.service import catalog w = WorkspaceClient() - all = w.external_locations.list() + all = w.external_locations.list(catalog.ListExternalLocationsRequest()) Gets an array of external locations (__ExternalLocationInfo__ objects) from the metastore. The caller must be a metastore admin, the owner of the external location, or a user that has some privilege on @@ -190,24 +191,24 @@ credential = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) created = w.external_locations.create( name=f"sdk-{time.time_ns()}", credential_name=credential.name, - url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), + url=f's3://{os.environ["TEST_BUCKET"]}/sdk-{time.time_ns()}', ) _ = w.external_locations.update( name=created.name, credential_name=credential.name, - url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), + url=f's3://{os.environ["TEST_BUCKET"]}/sdk-{time.time_ns()}', ) # cleanup - w.storage_credentials.delete(name=credential.name) - w.external_locations.delete(name=created.name) + w.storage_credentials.delete(delete=credential.name) + w.external_locations.delete(delete=created.name) Updates an external location in the metastore. The caller must be the owner of the external location, or be a metastore admin. In the second case, the admin can only update the name of the external diff --git a/docs/workspace/catalog/grants.rst b/docs/workspace/catalog/grants.rst index 69f2dd6c5..e5d7c110f 100644 --- a/docs/workspace/catalog/grants.rst +++ b/docs/workspace/catalog/grants.rst @@ -14,7 +14,7 @@ within the catalog. Similarly, privileges granted on a schema are inherited by all current and future objects within that schema. - .. py:method:: get(securable_type: str, full_name: str [, max_results: Optional[int], page_token: Optional[str], principal: Optional[str]]) -> GetPermissionsResponse + .. py:method:: get(securable_type: str, full_name: str [, include_deleted_principals: Optional[bool], max_results: Optional[int], page_token: Optional[str], principal: Optional[str]]) -> GetPermissionsResponse Usage: @@ -73,6 +73,8 @@ Type of securable. :param full_name: str Full name of securable. + :param include_deleted_principals: bool (optional) + Optional. If true, also return privilege assignments whose principals have been deleted. :param max_results: int (optional) Specifies the maximum number of privileges to return (page length). Every PrivilegeAssignment present in a single page response is guaranteed to contain all the privileges granted on the diff --git a/docs/workspace/catalog/rfa.rst b/docs/workspace/catalog/rfa.rst index 3019403bb..e5e05073e 100644 --- a/docs/workspace/catalog/rfa.rst +++ b/docs/workspace/catalog/rfa.rst @@ -4,12 +4,10 @@ .. py:class:: RfaAPI - Request for Access enables customers to request access to and manage access request destinations for Unity - Catalog securables. + Request for Access enables users to request access for Unity Catalog securables. - These APIs provide a standardized way to update, get, and request to access request destinations. - Fine-grained authorization ensures that only users with appropriate permissions can manage access request - destinations. + These APIs provide a standardized way for securable owners (or users with MANAGE privileges) to manage + access request destinations. .. py:method:: batch_create_access_requests( [, requests: Optional[List[CreateAccessRequest]]]) -> BatchCreateAccessRequestsResponse diff --git a/docs/workspace/catalog/storage_credentials.rst b/docs/workspace/catalog/storage_credentials.rst index d8111141e..2eacfda5e 100644 --- a/docs/workspace/catalog/storage_credentials.rst +++ b/docs/workspace/catalog/storage_credentials.rst @@ -30,13 +30,13 @@ w = WorkspaceClient() - credential = w.storage_credentials.create( + created = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) # cleanup - w.storage_credentials.delete(name=credential.name) + w.storage_credentials.delete(delete=created.name) Creates a new storage credential. diff --git a/docs/workspace/catalog/tables.rst b/docs/workspace/catalog/tables.rst index b33bef940..8de553fc2 100644 --- a/docs/workspace/catalog/tables.rst +++ b/docs/workspace/catalog/tables.rst @@ -156,7 +156,7 @@ created_schema = w.schemas.create(name=f"sdk-{time.time_ns()}", catalog_name=created_catalog.name) - all_tables = w.tables.list(catalog_name=created_catalog.name, schema_name=created_schema.name) + summaries = w.tables.list_summaries(catalog_name=created_catalog.name, schema_name_pattern=created_schema.name) # cleanup w.schemas.delete(full_name=created_schema.full_name) diff --git a/docs/workspace/compute/instance_pools.rst b/docs/workspace/compute/instance_pools.rst index 8be26a2d6..e48748fdb 100644 --- a/docs/workspace/compute/instance_pools.rst +++ b/docs/workspace/compute/instance_pools.rst @@ -19,7 +19,7 @@ Databricks does not charge DBUs while instances are idle in the pool. Instance provider billing does apply. See pricing. - .. py:method:: create(instance_pool_name: str, node_type_id: str [, aws_attributes: Optional[InstancePoolAwsAttributes], azure_attributes: Optional[InstancePoolAzureAttributes], custom_tags: Optional[Dict[str, str]], disk_spec: Optional[DiskSpec], enable_elastic_disk: Optional[bool], gcp_attributes: Optional[InstancePoolGcpAttributes], idle_instance_autotermination_minutes: Optional[int], max_capacity: Optional[int], min_idle_instances: Optional[int], preloaded_docker_images: Optional[List[DockerImage]], preloaded_spark_versions: Optional[List[str]], remote_disk_throughput: Optional[int], total_initial_remote_disk_size: Optional[int]]) -> CreateInstancePoolResponse + .. py:method:: create(instance_pool_name: str, node_type_id: str [, aws_attributes: Optional[InstancePoolAwsAttributes], azure_attributes: Optional[InstancePoolAzureAttributes], custom_tags: Optional[Dict[str, str]], disk_spec: Optional[DiskSpec], enable_auto_alternate_node_types: Optional[bool], enable_elastic_disk: Optional[bool], gcp_attributes: Optional[InstancePoolGcpAttributes], idle_instance_autotermination_minutes: Optional[int], max_capacity: Optional[int], min_idle_instances: Optional[int], node_type_flexibility: Optional[NodeTypeFlexibility], preloaded_docker_images: Optional[List[DockerImage]], preloaded_spark_versions: Optional[List[str]], remote_disk_throughput: Optional[int], total_initial_remote_disk_size: Optional[int]]) -> CreateInstancePoolResponse Usage: @@ -62,6 +62,9 @@ - Currently, Databricks allows at most 45 custom tags :param disk_spec: :class:`DiskSpec` (optional) Defines the specification of the disks that will be attached to all spark containers. + :param enable_auto_alternate_node_types: bool (optional) + For pools with node type flexibility (Fleet-V2), whether auto generated alternate node type ids are + enabled. This field should not be true if node_type_flexibility is set. :param enable_elastic_disk: bool (optional) Autoscaling Local Storage: when enabled, this instances in this pool will dynamically acquire additional disk space when its Spark workers are running low on disk space. In AWS, this feature @@ -81,6 +84,10 @@ upsize requests. :param min_idle_instances: int (optional) Minimum number of idle instances to keep in the instance pool + :param node_type_flexibility: :class:`NodeTypeFlexibility` (optional) + For pools with node type flexibility (Fleet-V2), this object contains the information about the + alternate node type ids to use when attempting to launch a cluster if the node type id is not + available. This field should not be set if enable_auto_alternate_node_types is true. :param preloaded_docker_images: List[:class:`DockerImage`] (optional) Custom Docker Image BYOC :param preloaded_spark_versions: List[str] (optional) @@ -107,7 +114,7 @@ - .. py:method:: edit(instance_pool_id: str, instance_pool_name: str, node_type_id: str [, custom_tags: Optional[Dict[str, str]], idle_instance_autotermination_minutes: Optional[int], max_capacity: Optional[int], min_idle_instances: Optional[int], remote_disk_throughput: Optional[int], total_initial_remote_disk_size: Optional[int]]) + .. py:method:: edit(instance_pool_id: str, instance_pool_name: str, node_type_id: str [, custom_tags: Optional[Dict[str, str]], enable_auto_alternate_node_types: Optional[bool], idle_instance_autotermination_minutes: Optional[int], max_capacity: Optional[int], min_idle_instances: Optional[int], node_type_flexibility: Optional[NodeTypeFlexibility], remote_disk_throughput: Optional[int], total_initial_remote_disk_size: Optional[int]]) Usage: @@ -150,6 +157,9 @@ EBS volumes) with these tags in addition to `default_tags`. Notes: - Currently, Databricks allows at most 45 custom tags + :param enable_auto_alternate_node_types: bool (optional) + For pools with node type flexibility (Fleet-V2), whether auto generated alternate node type ids are + enabled. This field should not be true if node_type_flexibility is set. :param idle_instance_autotermination_minutes: int (optional) Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met. If not set, the extra pool instances @@ -162,6 +172,10 @@ upsize requests. :param min_idle_instances: int (optional) Minimum number of idle instances to keep in the instance pool + :param node_type_flexibility: :class:`NodeTypeFlexibility` (optional) + For pools with node type flexibility (Fleet-V2), this object contains the information about the + alternate node type ids to use when attempting to launch a cluster if the node type id is not + available. This field should not be set if enable_auto_alternate_node_types is true. :param remote_disk_throughput: int (optional) If set, what the configurable throughput (in Mb/s) for the remote disk is. Currently only supported for GCP HYPERDISK_BALANCED types. diff --git a/docs/workspace/compute/libraries.rst b/docs/workspace/compute/libraries.rst index dfafea7aa..79c3b116e 100644 --- a/docs/workspace/compute/libraries.rst +++ b/docs/workspace/compute/libraries.rst @@ -41,6 +41,41 @@ :returns: Iterator over :class:`LibraryFullStatus` + .. py:method:: create_default_base_environment(default_base_environment: DefaultBaseEnvironment [, request_id: Optional[str]]) -> DefaultBaseEnvironment + + Create a default base environment within workspaces to define the environment version and a list of + dependencies to be used in serverless notebooks and jobs. This process will asynchronously generate a + cache to optimize dependency resolution. + + :param default_base_environment: :class:`DefaultBaseEnvironment` + :param request_id: str (optional) + A unique identifier for this request. A random UUID is recommended. This request is only idempotent + if a `request_id` is provided. + + :returns: :class:`DefaultBaseEnvironment` + + + .. py:method:: delete_default_base_environment(id: str) + + Delete the default base environment given an ID. The default base environment may be used by + downstream workloads. Please ensure that the deletion is intentional. + + :param id: str + + + + + .. py:method:: get_default_base_environment(id: str [, trace_id: Optional[str]]) -> DefaultBaseEnvironment + + Return the default base environment details for a given ID. + + :param id: str + :param trace_id: str (optional) + Deprecated: use ctx.requestId instead + + :returns: :class:`DefaultBaseEnvironment` + + .. py:method:: install(cluster_id: str, libraries: List[Library]) Add libraries to install on a cluster. The installation is asynchronous; it happens in the background @@ -54,6 +89,26 @@ + .. py:method:: list_default_base_environments( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[DefaultBaseEnvironment] + + List default base environments defined in the workspaces for the requested user. + + :param page_size: int (optional) + :param page_token: str (optional) + + :returns: Iterator over :class:`DefaultBaseEnvironment` + + + .. py:method:: refresh_default_base_environments(ids: List[str]) + + Refresh the cached default base environments for the given IDs. This process will asynchronously + regenerate the caches. The existing caches remains available until it expires. + + :param ids: List[str] + + + + .. py:method:: uninstall(cluster_id: str, libraries: List[Library]) Set libraries to uninstall from a cluster. The libraries won't be uninstalled until the cluster is @@ -65,4 +120,26 @@ The libraries to uninstall. + + + .. py:method:: update_default_base_environment(id: str, default_base_environment: DefaultBaseEnvironment) -> DefaultBaseEnvironment + + Update the default base environment for the given ID. This process will asynchronously regenerate the + cache. The existing cache remains available until it expires. + + :param id: str + :param default_base_environment: :class:`DefaultBaseEnvironment` + + :returns: :class:`DefaultBaseEnvironment` + + + .. py:method:: update_default_default_base_environment( [, base_environment_type: Optional[BaseEnvironmentType], id: Optional[str]]) -> DefaultBaseEnvironment + + Set the default base environment for the workspace. This marks the specified DBE as the workspace + default. + + :param base_environment_type: :class:`BaseEnvironmentType` (optional) + :param id: str (optional) + + :returns: :class:`DefaultBaseEnvironment` \ No newline at end of file diff --git a/docs/workspace/dashboards/genie.rst b/docs/workspace/dashboards/genie.rst index 3ceb286ef..3c018651c 100644 --- a/docs/workspace/dashboards/genie.rst +++ b/docs/workspace/dashboards/genie.rst @@ -29,6 +29,24 @@ .. py:method:: create_message_and_wait(space_id: str, conversation_id: str, content: str, timeout: datetime.timedelta = 0:20:00) -> GenieMessage + .. py:method:: create_space(warehouse_id: str, serialized_space: str [, description: Optional[str], parent_path: Optional[str], title: Optional[str]]) -> GenieSpace + + Creates a Genie space from a serialized payload. + + :param warehouse_id: str + Warehouse to associate with the new space + :param serialized_space: str + Serialized export model for the space contents + :param description: str (optional) + Optional description + :param parent_path: str (optional) + Parent folder path where the space will be registered + :param title: str (optional) + Optional title override + + :returns: :class:`GenieSpace` + + .. py:method:: delete_conversation(space_id: str, conversation_id: str) Delete a conversation. @@ -87,6 +105,51 @@ :returns: :class:`GenieGetMessageQueryResultResponse` + .. py:method:: generate_download_full_query_result(space_id: str, conversation_id: str, message_id: str, attachment_id: str) -> GenieGenerateDownloadFullQueryResultResponse + + Initiates a new SQL execution and returns a `download_id` that you can use to track the progress of + the download. The query result is stored in an external link and can be retrieved using the [Get + Download Full Query Result](:method:genie/getdownloadfullqueryresult) API. Warning: Databricks + strongly recommends that you protect the URLs that are returned by the `EXTERNAL_LINKS` disposition. + See [Execute Statement](:method:statementexecution/executestatement) for more details. + + :param space_id: str + Genie space ID + :param conversation_id: str + Conversation ID + :param message_id: str + Message ID + :param attachment_id: str + Attachment ID + + :returns: :class:`GenieGenerateDownloadFullQueryResultResponse` + + + .. py:method:: get_download_full_query_result(space_id: str, conversation_id: str, message_id: str, attachment_id: str, download_id: str) -> GenieGetDownloadFullQueryResultResponse + + After [Generating a Full Query Result Download](:method:genie/getdownloadfullqueryresult) and + successfully receiving a `download_id`, use this API to poll the download progress. When the download + is complete, the API returns one or more external links to the query result files. Warning: Databricks + strongly recommends that you protect the URLs that are returned by the `EXTERNAL_LINKS` disposition. + You must not set an Authorization header in download requests. When using the `EXTERNAL_LINKS` + disposition, Databricks returns presigned URLs that grant temporary access to data. See [Execute + Statement](:method:statementexecution/executestatement) for more details. + + :param space_id: str + Genie space ID + :param conversation_id: str + Conversation ID + :param message_id: str + Message ID + :param attachment_id: str + Attachment ID + :param download_id: str + Download ID. This ID is provided by the [Generate Download + endpoint](:method:genie/generateDownloadFullQueryResult) + + :returns: :class:`GenieGetDownloadFullQueryResultResponse` + + .. py:method:: get_message(space_id: str, conversation_id: str, message_id: str) -> GenieMessage Get message from conversation. @@ -205,7 +268,7 @@ :returns: :class:`GenieListSpacesResponse` - .. py:method:: send_message_feedback(space_id: str, conversation_id: str, message_id: str, rating: GenieFeedbackRating) + .. py:method:: send_message_feedback(space_id: str, conversation_id: str, message_id: str, rating: GenieFeedbackRating [, comment: Optional[str]]) Send feedback for a message. @@ -217,6 +280,8 @@ The ID associated with the message to provide feedback for. :param rating: :class:`GenieFeedbackRating` The rating (POSITIVE, NEGATIVE, or NONE). + :param comment: str (optional) + Optional text feedback that will be stored as a comment. @@ -248,4 +313,22 @@ + .. py:method:: update_space(space_id: str [, description: Optional[str], serialized_space: Optional[str], title: Optional[str], warehouse_id: Optional[str]]) -> GenieSpace + + Updates a Genie space with a serialized payload. + + :param space_id: str + Genie space ID + :param description: str (optional) + Optional description + :param serialized_space: str (optional) + Serialized export model for the space contents (full replacement) + :param title: str (optional) + Optional title override + :param warehouse_id: str (optional) + Optional warehouse override + + :returns: :class:`GenieSpace` + + .. py:method:: wait_get_message_genie_completed(conversation_id: str, message_id: str, space_id: str, timeout: datetime.timedelta = 0:20:00, callback: Optional[Callable[[GenieMessage], None]]) -> GenieMessage diff --git a/docs/workspace/dashboards/index.rst b/docs/workspace/dashboards/index.rst index 877891d25..940efa5dd 100644 --- a/docs/workspace/dashboards/index.rst +++ b/docs/workspace/dashboards/index.rst @@ -9,4 +9,5 @@ Manage Lakeview dashboards genie lakeview - lakeview_embedded \ No newline at end of file + lakeview_embedded + query_execution \ No newline at end of file diff --git a/docs/workspace/dashboards/lakeview_embedded.rst b/docs/workspace/dashboards/lakeview_embedded.rst index 80eb5bc48..f48bf63bf 100644 --- a/docs/workspace/dashboards/lakeview_embedded.rst +++ b/docs/workspace/dashboards/lakeview_embedded.rst @@ -6,6 +6,16 @@ Token-based Lakeview APIs for embedding dashboards in external applications. + .. py:method:: get_published_dashboard_embedded(dashboard_id: str) + + Get the current published dashboard within an embedded context. + + :param dashboard_id: str + UUID identifying the published dashboard. + + + + .. py:method:: get_published_dashboard_token_info(dashboard_id: str [, external_value: Optional[str], external_viewer_id: Optional[str]]) -> GetPublishedDashboardTokenInfoResponse Get a required authorization details and scopes of a published dashboard to mint an OAuth token. diff --git a/docs/workspace/database/database.rst b/docs/workspace/database/database.rst index 42ba0bb4d..6427132b5 100644 --- a/docs/workspace/database/database.rst +++ b/docs/workspace/database/database.rst @@ -107,15 +107,28 @@ - .. py:method:: delete_synced_database_table(name: str) + .. py:method:: delete_synced_database_table(name: str [, purge_data: Optional[bool]]) Delete a Synced Database Table. :param name: str + :param purge_data: bool (optional) + Optional. When set to true, the actual PostgreSQL table will be dropped from the database. + .. py:method:: failover_database_instance(name: str [, failover_target_database_instance_name: Optional[str]]) -> DatabaseInstance + + Failover the primary node of a Database Instance to a secondary. + + :param name: str + Name of the instance to failover. + :param failover_target_database_instance_name: str (optional) + + :returns: :class:`DatabaseInstance` + + .. py:method:: find_database_instance_by_uid( [, uid: Optional[str]]) -> DatabaseInstance Find a Database Instance by uid. @@ -270,6 +283,19 @@ :returns: :class:`DatabaseInstance` + .. py:method:: update_database_instance_role(instance_name: str, name: str, database_instance_role: DatabaseInstanceRole [, database_instance_name: Optional[str]]) -> DatabaseInstanceRole + + Update a role for a Database Instance. + + :param instance_name: str + :param name: str + The name of the role. This is the unique identifier for the role in an instance. + :param database_instance_role: :class:`DatabaseInstanceRole` + :param database_instance_name: str (optional) + + :returns: :class:`DatabaseInstanceRole` + + .. py:method:: update_synced_database_table(name: str, synced_table: SyncedDatabaseTable, update_mask: str) -> SyncedDatabaseTable This API is currently unimplemented, but exposed for Terraform support. diff --git a/docs/workspace/iam/current_user.rst b/docs/workspace/iam/current_user.rst index b2390ce63..2f95213e2 100644 --- a/docs/workspace/iam/current_user.rst +++ b/docs/workspace/iam/current_user.rst @@ -17,7 +17,7 @@ w = WorkspaceClient() - me = w.current_user.me() + me2 = w.current_user.me() Get details about the current method caller's identity. diff --git a/docs/workspace/iamv2/workspace_iam_v2.rst b/docs/workspace/iamv2/workspace_iam_v2.rst index 4bc594c67..8fff6c48a 100644 --- a/docs/workspace/iamv2/workspace_iam_v2.rst +++ b/docs/workspace/iamv2/workspace_iam_v2.rst @@ -6,6 +6,116 @@ These APIs are used to manage identities and the workspace access of these identities in . + .. py:method:: create_group_proxy(group: Group) -> Group + + TODO: Write description later when this method is implemented + + :param group: :class:`Group` + Required. Group to be created in + + :returns: :class:`Group` + + + .. py:method:: create_service_principal_proxy(service_principal: ServicePrincipal) -> ServicePrincipal + + TODO: Write description later when this method is implemented + + :param service_principal: :class:`ServicePrincipal` + Required. Service principal to be created in + + :returns: :class:`ServicePrincipal` + + + .. py:method:: create_user_proxy(user: User) -> User + + TODO: Write description later when this method is implemented + + :param user: :class:`User` + Required. User to be created in + + :returns: :class:`User` + + + .. py:method:: create_workspace_access_detail_local(workspace_access_detail: WorkspaceAccessDetail) -> WorkspaceAccessDetail + + TODO: Write description later when this method is implemented + + :param workspace_access_detail: :class:`WorkspaceAccessDetail` + Required. Workspace access detail to be created in . + + :returns: :class:`WorkspaceAccessDetail` + + + .. py:method:: delete_group_proxy(internal_id: int) + + TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the group in Databricks. + + + + + .. py:method:: delete_service_principal_proxy(internal_id: int) + + TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the service principal in Databricks. + + + + + .. py:method:: delete_user_proxy(internal_id: int) + + TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the user in Databricks. + + + + + .. py:method:: delete_workspace_access_detail_local(principal_id: int) + + TODO: Write description later when this method is implemented + + :param principal_id: int + Required. ID of the principal in Databricks. + + + + + .. py:method:: get_group_proxy(internal_id: int) -> Group + + TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the group in Databricks. + + :returns: :class:`Group` + + + .. py:method:: get_service_principal_proxy(internal_id: int) -> ServicePrincipal + + TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the service principal in Databricks. + + :returns: :class:`ServicePrincipal` + + + .. py:method:: get_user_proxy(internal_id: int) -> User + + TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the user in Databricks. + + :returns: :class:`User` + + .. py:method:: get_workspace_access_detail_local(principal_id: int [, view: Optional[WorkspaceAccessDetailView]]) -> WorkspaceAccessDetail Returns the access details for a principal in the current workspace. Allows for checking access @@ -23,6 +133,58 @@ :returns: :class:`WorkspaceAccessDetail` + .. py:method:: list_groups_proxy( [, page_size: Optional[int], page_token: Optional[str]]) -> ListGroupsResponse + + TODO: Write description later when this method is implemented + + :param page_size: int (optional) + The maximum number of groups to return. The service may return fewer than this value. + :param page_token: str (optional) + A page token, received from a previous ListGroups call. Provide this to retrieve the subsequent + page. + + :returns: :class:`ListGroupsResponse` + + + .. py:method:: list_service_principals_proxy( [, page_size: Optional[int], page_token: Optional[str]]) -> ListServicePrincipalsResponse + + TODO: Write description later when this method is implemented + + :param page_size: int (optional) + The maximum number of SPs to return. The service may return fewer than this value. + :param page_token: str (optional) + A page token, received from a previous ListServicePrincipals call. Provide this to retrieve the + subsequent page. + + :returns: :class:`ListServicePrincipalsResponse` + + + .. py:method:: list_users_proxy( [, page_size: Optional[int], page_token: Optional[str]]) -> ListUsersResponse + + TODO: Write description later when this method is implemented + + :param page_size: int (optional) + The maximum number of users to return. The service may return fewer than this value. + :param page_token: str (optional) + A page token, received from a previous ListUsers call. Provide this to retrieve the subsequent page. + + :returns: :class:`ListUsersResponse` + + + .. py:method:: list_workspace_access_details_local( [, page_size: Optional[int], page_token: Optional[str]]) -> ListWorkspaceAccessDetailsResponse + + TODO: Write description later when this method is implemented + + :param page_size: int (optional) + The maximum number of workspace access details to return. The service may return fewer than this + value. + :param page_token: str (optional) + A page token, received from a previous ListWorkspaceAccessDetails call. Provide this to retrieve the + subsequent page. + + :returns: :class:`ListWorkspaceAccessDetailsResponse` + + .. py:method:: resolve_group_proxy(external_id: str) -> ResolveGroupResponse Resolves a group with the given external ID from the customer's IdP. If the group does not exist, it @@ -57,4 +219,60 @@ Required. The external ID of the user in the customer's IdP. :returns: :class:`ResolveUserResponse` + + + .. py:method:: update_group_proxy(internal_id: int, group: Group, update_mask: str) -> Group + + TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the group in Databricks. + :param group: :class:`Group` + Required. Group to be updated in + :param update_mask: str + Optional. The list of fields to update. + + :returns: :class:`Group` + + + .. py:method:: update_service_principal_proxy(internal_id: int, service_principal: ServicePrincipal, update_mask: str) -> ServicePrincipal + + TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the service principal in Databricks. + :param service_principal: :class:`ServicePrincipal` + Required. Service principal to be updated in + :param update_mask: str + Optional. The list of fields to update. + + :returns: :class:`ServicePrincipal` + + + .. py:method:: update_user_proxy(internal_id: int, user: User, update_mask: str) -> User + + TODO: Write description later when this method is implemented + + :param internal_id: int + Required. Internal ID of the user in Databricks. + :param user: :class:`User` + Required. User to be updated in + :param update_mask: str + Optional. The list of fields to update. + + :returns: :class:`User` + + + .. py:method:: update_workspace_access_detail_local(principal_id: int, workspace_access_detail: WorkspaceAccessDetail, update_mask: str) -> WorkspaceAccessDetail + + TODO: Write description later when this method is implemented + + :param principal_id: int + Required. ID of the principal in Databricks. + :param workspace_access_detail: :class:`WorkspaceAccessDetail` + Required. WorkspaceAccessDetail to be updated in + :param update_mask: str + Optional. The list of fields to update. + + :returns: :class:`WorkspaceAccessDetail` \ No newline at end of file diff --git a/docs/workspace/index.rst b/docs/workspace/index.rst index 7f69ccba9..c9c9b27ed 100644 --- a/docs/workspace/index.rst +++ b/docs/workspace/index.rst @@ -23,6 +23,7 @@ These APIs are available from WorkspaceClient ml/index oauth2/index pipelines/index + postgres/index qualitymonitorv2/index serving/index settings/index diff --git a/docs/workspace/jobs/jobs.rst b/docs/workspace/jobs/jobs.rst index 0b82986de..7c230ce3f 100644 --- a/docs/workspace/jobs/jobs.rst +++ b/docs/workspace/jobs/jobs.rst @@ -10,7 +10,7 @@ scalable resources. Your job can consist of a single task or can be a large, multi-task workflow with complex dependencies. Databricks manages the task orchestration, cluster management, monitoring, and error reporting for all of your jobs. You can run your jobs immediately or periodically through an easy-to-use - scheduling system. You can implement job tasks using notebooks, JARS, Delta Live Tables pipelines, or + scheduling system. You can implement job tasks using notebooks, JARS, Spark Declarative Pipelines, or Python, Scala, Spark submit, and Java applications. You should never hard code secrets or store them in plain text. Use the [Secrets CLI] to manage secrets in @@ -126,7 +126,7 @@ .. py:method:: cancel_run_and_wait(run_id: int, timeout: datetime.timedelta = 0:20:00) -> Run - .. py:method:: create( [, access_control_list: Optional[List[JobAccessControlRequest]], budget_policy_id: Optional[str], continuous: Optional[Continuous], deployment: Optional[JobDeployment], description: Optional[str], edit_mode: Optional[JobEditMode], email_notifications: Optional[JobEmailNotifications], environments: Optional[List[JobEnvironment]], format: Optional[Format], git_source: Optional[GitSource], health: Optional[JobsHealthRules], job_clusters: Optional[List[JobCluster]], max_concurrent_runs: Optional[int], name: Optional[str], notification_settings: Optional[JobNotificationSettings], parameters: Optional[List[JobParameterDefinition]], performance_target: Optional[PerformanceTarget], queue: Optional[QueueSettings], run_as: Optional[JobRunAs], schedule: Optional[CronSchedule], tags: Optional[Dict[str, str]], tasks: Optional[List[Task]], timeout_seconds: Optional[int], trigger: Optional[TriggerSettings], usage_policy_id: Optional[str], webhook_notifications: Optional[WebhookNotifications]]) -> CreateResponse + .. py:method:: create( [, access_control_list: Optional[List[JobAccessControlRequest]], budget_policy_id: Optional[str], continuous: Optional[Continuous], deployment: Optional[JobDeployment], description: Optional[str], edit_mode: Optional[JobEditMode], email_notifications: Optional[JobEmailNotifications], environments: Optional[List[JobEnvironment]], format: Optional[Format], git_source: Optional[GitSource], health: Optional[JobsHealthRules], job_clusters: Optional[List[JobCluster]], max_concurrent_runs: Optional[int], name: Optional[str], notification_settings: Optional[JobNotificationSettings], parameters: Optional[List[JobParameterDefinition]], parent_path: Optional[str], performance_target: Optional[PerformanceTarget], queue: Optional[QueueSettings], run_as: Optional[JobRunAs], schedule: Optional[CronSchedule], tags: Optional[Dict[str, str]], tasks: Optional[List[Task]], timeout_seconds: Optional[int], trigger: Optional[TriggerSettings], usage_policy_id: Optional[str], webhook_notifications: Optional[WebhookNotifications]]) -> CreateResponse Usage: @@ -188,9 +188,10 @@ as when this job is deleted. :param environments: List[:class:`JobEnvironment`] (optional) A list of task execution environment specifications that can be referenced by serverless tasks of - this job. An environment is required to be present for serverless tasks. For serverless notebook - tasks, the environment is accessible in the notebook environment panel. For other serverless tasks, - the task environment is required to be specified using environment_key in the task settings. + this job. For serverless notebook tasks, if the environment_key is not specified, the notebook + environment will be used if present. If a jobs environment is specified, it will override the + notebook environment. For other serverless tasks, the task environment is required to be specified + using environment_key in the task settings. :param format: :class:`Format` (optional) Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. When using the Jobs API 2.1 this value is always set to `"MULTI_TASK"`. @@ -223,6 +224,9 @@ `email_notifications` and `webhook_notifications` for this job. :param parameters: List[:class:`JobParameterDefinition`] (optional) Job-level parameter definitions + :param parent_path: str (optional) + Path of the job parent folder in workspace file tree. If absent, the job doesn't have a workspace + object. :param performance_target: :class:`PerformanceTarget` (optional) The performance mode on a serverless job. This field determines the level of compute performance or cost-efficiency for the run. @@ -522,37 +526,11 @@ .. code-block:: - import os - import time - from databricks.sdk import WorkspaceClient - from databricks.sdk.service import jobs w = WorkspaceClient() - notebook_path = f"/Users/{w.current_user.me().user_name}/sdk-{time.time_ns()}" - - cluster_id = ( - w.clusters.ensure_cluster_is_running(os.environ["DATABRICKS_CLUSTER_ID"]) and os.environ["DATABRICKS_CLUSTER_ID"] - ) - - created_job = w.jobs.create( - name=f"sdk-{time.time_ns()}", - tasks=[ - jobs.Task( - description="test", - existing_cluster_id=cluster_id, - notebook_task=jobs.NotebookTask(notebook_path=notebook_path), - task_key="test", - timeout_seconds=0, - ) - ], - ) - - run_list = w.jobs.list_runs(job_id=created_job.job_id) - - # cleanup - w.jobs.delete(job_id=created_job.job_id) + job_list = w.jobs.list(expand_tasks=False) List jobs. diff --git a/docs/workspace/ml/experiments.rst b/docs/workspace/ml/experiments.rst index 0a514b33c..abc64624b 100644 --- a/docs/workspace/ml/experiments.rst +++ b/docs/workspace/ml/experiments.rst @@ -274,6 +274,16 @@ :returns: :class:`GetLoggedModelResponse` + .. py:method:: get_logged_models( [, model_ids: Optional[List[str]]]) -> GetLoggedModelsRequestResponse + + Batch endpoint for getting logged models from a list of model IDs + + :param model_ids: List[str] (optional) + The IDs of the logged models to retrieve. Max threshold is 100. + + :returns: :class:`GetLoggedModelsRequestResponse` + + .. py:method:: get_permission_levels(experiment_id: str) -> GetExperimentPermissionLevelsResponse Gets the permission levels that a user can have on an object. diff --git a/docs/workspace/ml/model_registry.rst b/docs/workspace/ml/model_registry.rst index 98d803a63..601ffd87d 100644 --- a/docs/workspace/ml/model_registry.rst +++ b/docs/workspace/ml/model_registry.rst @@ -90,7 +90,9 @@ w = WorkspaceClient() - created = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") + model = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") + + created = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") Creates a new registered model with the name specified in the request body. Throws `RESOURCE_ALREADY_EXISTS` if a registered model with the given name exists. @@ -734,13 +736,14 @@ w = WorkspaceClient() - created = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") + model = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") - model = w.model_registry.get_model(name=created.registered_model.name) + created = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") - w.model_registry.update_model( - name=model.registered_model_databricks.name, + w.model_registry.update_model_version( description=f"sdk-{time.time_ns()}", + name=created.model_version.name, + version=created.model_version.version, ) Updates a registered model. diff --git a/docs/workspace/pipelines/pipelines.rst b/docs/workspace/pipelines/pipelines.rst index b0bada615..f27d367f1 100644 --- a/docs/workspace/pipelines/pipelines.rst +++ b/docs/workspace/pipelines/pipelines.rst @@ -16,6 +16,16 @@ step. You can also enforce data quality with Spark Declarative Pipelines expectations. Expectations allow you to define expected data quality and specify how to handle records that fail those expectations. + .. py:method:: apply_environment(pipeline_id: str) -> ApplyEnvironmentRequestResponse + + * Applies the current pipeline environment onto the pipeline compute. The environment applied can be + used by subsequent dev-mode updates. + + :param pipeline_id: str + + :returns: :class:`ApplyEnvironmentRequestResponse` + + .. py:method:: create( [, allow_duplicate_names: Optional[bool], budget_policy_id: Optional[str], catalog: Optional[str], channel: Optional[str], clusters: Optional[List[PipelineCluster]], configuration: Optional[Dict[str, str]], continuous: Optional[bool], deployment: Optional[PipelineDeployment], development: Optional[bool], dry_run: Optional[bool], edition: Optional[str], environment: Optional[PipelinesEnvironment], event_log: Optional[EventLogSpec], filters: Optional[Filters], gateway_definition: Optional[IngestionGatewayPipelineDefinition], id: Optional[str], ingestion_definition: Optional[IngestionPipelineDefinition], libraries: Optional[List[PipelineLibrary]], name: Optional[str], notifications: Optional[List[Notifications]], photon: Optional[bool], restart_window: Optional[RestartWindow], root_path: Optional[str], run_as: Optional[RunAs], schema: Optional[str], serverless: Optional[bool], storage: Optional[str], tags: Optional[Dict[str, str]], target: Optional[str], trigger: Optional[PipelineTrigger], usage_policy_id: Optional[str]]) -> CreatePipelineResponse @@ -129,8 +139,8 @@ .. py:method:: delete(pipeline_id: str) - Deletes a pipeline. Deleting a pipeline is a permanent action that stops and removes the pipeline and - its tables. You cannot undo this action. + Deletes a pipeline. If the pipeline publishes to Unity Catalog, pipeline deletion will cascade to all + pipeline tables. Please reach out to Databricks support for assistance to undo this action. :param pipeline_id: str @@ -332,6 +342,17 @@ :returns: :class:`ListUpdatesResponse` + .. py:method:: restore_pipeline(pipeline_id: str) -> RestorePipelineRequestResponse + + * Restores a pipeline that was previously deleted, if within the restoration window. All tables + deleted at pipeline deletion will be undropped as well. + + :param pipeline_id: str + The ID of the pipeline to restore + + :returns: :class:`RestorePipelineRequestResponse` + + .. py:method:: set_permissions(pipeline_id: str [, access_control_list: Optional[List[PipelineAccessControlRequest]]]) -> PipelinePermissions Sets permissions on an object, replacing existing permissions if they exist. Deletes all direct @@ -344,7 +365,7 @@ :returns: :class:`PipelinePermissions` - .. py:method:: start_update(pipeline_id: str [, cause: Optional[StartUpdateCause], full_refresh: Optional[bool], full_refresh_selection: Optional[List[str]], refresh_selection: Optional[List[str]], validate_only: Optional[bool]]) -> StartUpdateResponse + .. py:method:: start_update(pipeline_id: str [, cause: Optional[StartUpdateCause], full_refresh: Optional[bool], full_refresh_selection: Optional[List[str]], refresh_selection: Optional[List[str]], rewind_spec: Optional[RewindSpec], validate_only: Optional[bool]]) -> StartUpdateResponse Starts a new update for the pipeline. If there is already an active update for the pipeline, the request will fail and the active update will remain running. @@ -361,6 +382,8 @@ A list of tables to update without fullRefresh. If both refresh_selection and full_refresh_selection are empty, this is a full graph update. Full Refresh on a table means that the states of the table will be reset before the refresh. + :param rewind_spec: :class:`RewindSpec` (optional) + The information about the requested rewind operation. If specified this is a rewind mode update. :param validate_only: bool (optional) If true, this update only validates the correctness of pipeline source code but does not materialize or publish any datasets. diff --git a/docs/workspace/postgres/index.rst b/docs/workspace/postgres/index.rst new file mode 100644 index 000000000..14bd0779f --- /dev/null +++ b/docs/workspace/postgres/index.rst @@ -0,0 +1,10 @@ + +Postgres +======== + +Manage Postgres database instances. + +.. toctree:: + :maxdepth: 1 + + postgres \ No newline at end of file diff --git a/docs/workspace/postgres/postgres.rst b/docs/workspace/postgres/postgres.rst new file mode 100644 index 000000000..3951227e4 --- /dev/null +++ b/docs/workspace/postgres/postgres.rst @@ -0,0 +1,232 @@ +``w.postgres``: Postgres +======================== +.. currentmodule:: databricks.sdk.service.postgres + +.. py:class:: PostgresAPI + + The Postgres API provides access to a Postgres database via REST API or direct SQL. + + .. py:method:: create_database_branch(parent: str, database_branch: DatabaseBranch [, database_branch_id: Optional[str]]) -> CreateDatabaseBranchOperation + + Create a Database Branch. + + :param parent: str + The Database Project where this Database Branch will be created. Format: projects/{project_id} + :param database_branch: :class:`DatabaseBranch` + The Database Branch to create. + :param database_branch_id: str (optional) + The ID to use for the Database Branch, which will become the final component of the branch's + resource name. + + This value should be 4-63 characters, and valid characters are /[a-z][0-9]-/. + + :returns: :class:`Operation` + + + .. py:method:: create_database_endpoint(parent: str, database_endpoint: DatabaseEndpoint [, database_endpoint_id: Optional[str]]) -> CreateDatabaseEndpointOperation + + Create a Database Endpoint. + + :param parent: str + The Database Branch where this Database Endpoint will be created. Format: + projects/{project_id}/branches/{branch_id} + :param database_endpoint: :class:`DatabaseEndpoint` + The Database Endpoint to create. + :param database_endpoint_id: str (optional) + The ID to use for the Database Endpoint, which will become the final component of the endpoint's + resource name. + + This value should be 4-63 characters, and valid characters are /[a-z][0-9]-/. + + :returns: :class:`Operation` + + + .. py:method:: create_database_project(database_project: DatabaseProject [, database_project_id: Optional[str]]) -> CreateDatabaseProjectOperation + + Create a Database Project. + + :param database_project: :class:`DatabaseProject` + The Database Project to create + :param database_project_id: str (optional) + The ID to use for the Database Project, which will become the final component of the project's + resource name. + + This value should be 4-63 characters, and valid characters are /[a-z][0-9]-/. + + :returns: :class:`Operation` + + + .. py:method:: delete_database_branch(name: str) + + Delete a Database Branch. + + :param name: str + The name of the Database Branch to delete. Format: projects/{project_id}/branches/{branch_id} + + + + + .. py:method:: delete_database_endpoint(name: str) + + Delete a Database Endpoint. + + :param name: str + The name of the Database Endpoint to delete. Format: + projects/{project_id}/branches/{branch_id}/endpoints/{endpoint_id} + + + + + .. py:method:: delete_database_project(name: str) + + Delete a Database Project. + + :param name: str + The name of the Database Project to delete. Format: projects/{project_id} + + + + + .. py:method:: get_database_branch(name: str) -> DatabaseBranch + + Get a Database Branch. + + :param name: str + The name of the Database Branch to retrieve. Format: projects/{project_id}/branches/{branch_id} + + :returns: :class:`DatabaseBranch` + + + .. py:method:: get_database_endpoint(name: str) -> DatabaseEndpoint + + Get a Database Endpoint. + + :param name: str + The name of the Database Endpoint to retrieve. Format: + projects/{project_id}/branches/{branch_id}/endpoints/{endpoint_id} + + :returns: :class:`DatabaseEndpoint` + + + .. py:method:: get_database_operation(name: str) -> Operation + + Get a Database Operation. + + :param name: str + The name of the operation resource. + + :returns: :class:`Operation` + + + .. py:method:: get_database_project(name: str) -> DatabaseProject + + Get a Database Project. + + :param name: str + The name of the Database Project to retrieve. Format: projects/{project_id} + + :returns: :class:`DatabaseProject` + + + .. py:method:: list_database_branches(parent: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[DatabaseBranch] + + List Database Branches. + + :param parent: str + The Database Project, which owns this collection of branches. Format: projects/{project_id} + :param page_size: int (optional) + Upper bound for items returned. + :param page_token: str (optional) + Pagination token to go to the next page of Database Branches. Requests first page if absent. + + :returns: Iterator over :class:`DatabaseBranch` + + + .. py:method:: list_database_endpoints(parent: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[DatabaseEndpoint] + + List Database Endpoints. + + :param parent: str + The Database Branch, which owns this collection of endpoints. Format: + projects/{project_id}/branches/{branch_id} + :param page_size: int (optional) + Upper bound for items returned. + :param page_token: str (optional) + Pagination token to go to the next page of Database Branches. Requests first page if absent. + + :returns: Iterator over :class:`DatabaseEndpoint` + + + .. py:method:: list_database_projects( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[DatabaseProject] + + List Database Projects. + + :param page_size: int (optional) + Upper bound for items returned. + :param page_token: str (optional) + Pagination token to go to the next page of Database Projects. Requests first page if absent. + + :returns: Iterator over :class:`DatabaseProject` + + + .. py:method:: restart_database_endpoint(name: str) -> RestartDatabaseEndpointOperation + + Restart a Database Endpoint. + + :param name: str + The name of the Database Endpoint to restart. Format: + projects/{project_id}/branches/{branch_id}/endpoints/{endpoint_id} + + :returns: :class:`Operation` + + + .. py:method:: update_database_branch(name: str, database_branch: DatabaseBranch, update_mask: str) -> UpdateDatabaseBranchOperation + + Update a Database Branch. + + :param name: str + The resource name of the branch. Format: projects/{project_id}/branches/{branch_id} + :param database_branch: :class:`DatabaseBranch` + The Database Branch to update. + + The branch's `name` field is used to identify the branch to update. Format: + projects/{project_id}/branches/{branch_id} + :param update_mask: str + The list of fields to update. If unspecified, all fields will be updated when possible. + + :returns: :class:`Operation` + + + .. py:method:: update_database_endpoint(name: str, database_endpoint: DatabaseEndpoint, update_mask: str) -> UpdateDatabaseEndpointOperation + + Update a Database Endpoint. + + :param name: str + The resource name of the endpoint. Format: + projects/{project_id}/branches/{branch_id}/endpoints/{endpoint_id} + :param database_endpoint: :class:`DatabaseEndpoint` + The Database Endpoint to update. + + The endpoints's `name` field is used to identify the endpoint to update. Format: + projects/{project_id}/branches/{branch_id}/endpoints/{endpoint_id} + :param update_mask: str + The list of fields to update. If unspecified, all fields will be updated when possible. + + :returns: :class:`Operation` + + + .. py:method:: update_database_project(name: str, database_project: DatabaseProject, update_mask: str) -> UpdateDatabaseProjectOperation + + Update a Database Project. + + :param name: str + The resource name of the project. Format: projects/{project_id} + :param database_project: :class:`DatabaseProject` + The Database Project to update. + + The project's `name` field is used to identify the project to update. Format: projects/{project_id} + :param update_mask: str + The list of fields to update. If unspecified, all fields will be updated when possible. + + :returns: :class:`Operation` + \ No newline at end of file diff --git a/docs/workspace/settings/tokens.rst b/docs/workspace/settings/tokens.rst index 18e7161cc..151dea0b7 100644 --- a/docs/workspace/settings/tokens.rst +++ b/docs/workspace/settings/tokens.rst @@ -7,7 +7,7 @@ The Token API allows you to create, list, and revoke tokens that can be used to authenticate and access Databricks REST APIs. - .. py:method:: create( [, comment: Optional[str], lifetime_seconds: Optional[int]]) -> CreateTokenResponse + .. py:method:: create( [, comment: Optional[str], lifetime_seconds: Optional[int], scopes: Optional[List[str]]]) -> CreateTokenResponse Usage: @@ -35,6 +35,8 @@ The lifetime of the token, in seconds. If the lifetime is not specified, this token remains valid indefinitely. + :param scopes: List[str] (optional) + Optional scopes of the token. :returns: :class:`CreateTokenResponse` diff --git a/docs/workspace/sharing/recipient_federation_policies.rst b/docs/workspace/sharing/recipient_federation_policies.rst index 770f9b1ca..0cdcd8559 100644 --- a/docs/workspace/sharing/recipient_federation_policies.rst +++ b/docs/workspace/sharing/recipient_federation_policies.rst @@ -93,4 +93,24 @@ :param page_token: str (optional) :returns: Iterator over :class:`FederationPolicy` + + + .. py:method:: update(recipient_name: str, name: str, policy: FederationPolicy [, update_mask: Optional[str]]) -> FederationPolicy + + Updates an existing federation policy for an OIDC_RECIPIENT. The caller must be the owner of the + recipient. + + :param recipient_name: str + Name of the recipient. This is the name of the recipient for which the policy is being updated. + :param name: str + Name of the policy. This is the name of the current name of the policy. + :param policy: :class:`FederationPolicy` + :param update_mask: str (optional) + The field mask specifies which fields of the policy to update. To specify multiple fields in the + field mask, use comma as the separator (no space). The special value '*' indicates that all fields + should be updated (full replacement). If unspecified, all fields that are set in the policy provided + in the update request will overwrite the corresponding fields in the existing policy. Example value: + 'comment,oidc_policy.audiences'. + + :returns: :class:`FederationPolicy` \ No newline at end of file diff --git a/docs/workspace/sharing/shares.rst b/docs/workspace/sharing/shares.rst index 80c5d11b6..00a7f1b8d 100644 --- a/docs/workspace/sharing/shares.rst +++ b/docs/workspace/sharing/shares.rst @@ -9,7 +9,7 @@ register data assets under their original name, qualified by their original schema, or provide alternate exposed names. - .. py:method:: create(name: str [, comment: Optional[str], storage_root: Optional[str]]) -> ShareInfo + .. py:method:: create(name: str [, comment: Optional[str], replication_enabled: Optional[bool], serverless_budget_policy_id: Optional[str], storage_root: Optional[str]]) -> ShareInfo Usage: @@ -34,6 +34,11 @@ Name of the share. :param comment: str (optional) User-provided free-form text description. + :param replication_enabled: bool (optional) + Whether replication is enabled for this share. + :param serverless_budget_policy_id: str (optional) + Serverless budget policy id (can only be created/updated when calling data-sharing service) + [Create,Update:IGN] :param storage_root: str (optional) Storage root URL for the share. @@ -134,7 +139,7 @@ :returns: :class:`GetSharePermissionsResponse` - .. py:method:: update(name: str [, comment: Optional[str], new_name: Optional[str], owner: Optional[str], storage_root: Optional[str], updates: Optional[List[SharedDataObjectUpdate]]]) -> ShareInfo + .. py:method:: update(name: str [, comment: Optional[str], new_name: Optional[str], owner: Optional[str], serverless_budget_policy_id: Optional[str], storage_root: Optional[str], updates: Optional[List[SharedDataObjectUpdate]]]) -> ShareInfo Usage: @@ -210,6 +215,9 @@ New name for the share. :param owner: str (optional) Username of current owner of share. + :param serverless_budget_policy_id: str (optional) + Serverless budget policy id (can only be created/updated when calling data-sharing service) + [Create,Update:IGN] :param storage_root: str (optional) Storage root URL for the share. :param updates: List[:class:`SharedDataObjectUpdate`] (optional) diff --git a/docs/workspace/sql/queries.rst b/docs/workspace/sql/queries.rst index 0dfb63fbf..f0081b3f2 100644 --- a/docs/workspace/sql/queries.rst +++ b/docs/workspace/sql/queries.rst @@ -29,7 +29,7 @@ display_name=f"sdk-{time.time_ns()}", warehouse_id=srcs[0].warehouse_id, description="test query from Go SDK", - query_text="SELECT 1", + query_text="SHOW TABLES", ) ) diff --git a/docs/workspace/tags/index.rst b/docs/workspace/tags/index.rst index 5489c384d..6074ed574 100644 --- a/docs/workspace/tags/index.rst +++ b/docs/workspace/tags/index.rst @@ -7,4 +7,5 @@ Manage tag policies and tag assignments on workspace objects .. toctree:: :maxdepth: 1 + tag_assignments tag_policies \ No newline at end of file diff --git a/docs/workspace/tags/tag_assignments.rst b/docs/workspace/tags/tag_assignments.rst new file mode 100644 index 000000000..37728c628 --- /dev/null +++ b/docs/workspace/tags/tag_assignments.rst @@ -0,0 +1,85 @@ +``w.tag_assignments``: Tag Assignments +====================================== +.. currentmodule:: databricks.sdk.service.tags + +.. py:class:: TagAssignmentsAPI + + Manage tag assignments on workspace-scoped objects. + + .. py:method:: create_tag_assignment(tag_assignment: TagAssignment) -> TagAssignment + + Create a tag assignment + + :param tag_assignment: :class:`TagAssignment` + + :returns: :class:`TagAssignment` + + + .. py:method:: delete_tag_assignment(entity_type: str, entity_id: str, tag_key: str) + + Delete a tag assignment + + :param entity_type: str + The type of entity to which the tag is assigned. Allowed value is dashboards + :param entity_id: str + The identifier of the entity to which the tag is assigned + :param tag_key: str + The key of the tag. The characters , . : / - = and leading/trailing spaces are not allowed + + + + + .. py:method:: get_tag_assignment(entity_type: str, entity_id: str, tag_key: str) -> TagAssignment + + Get a tag assignment + + :param entity_type: str + The type of entity to which the tag is assigned. Allowed value is dashboards + :param entity_id: str + The identifier of the entity to which the tag is assigned + :param tag_key: str + The key of the tag. The characters , . : / - = and leading/trailing spaces are not allowed + + :returns: :class:`TagAssignment` + + + .. py:method:: list_tag_assignments(entity_type: str, entity_id: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[TagAssignment] + + List the tag assignments for an entity + + :param entity_type: str + The type of entity to which the tag is assigned. Allowed value is dashboards + :param entity_id: str + The identifier of the entity to which the tag is assigned + :param page_size: int (optional) + Optional. Maximum number of tag assignments to return in a single page + :param page_token: str (optional) + Pagination token to go to the next page of tag assignments. Requests first page if absent. + + :returns: Iterator over :class:`TagAssignment` + + + .. py:method:: update_tag_assignment(entity_type: str, entity_id: str, tag_key: str, tag_assignment: TagAssignment, update_mask: str) -> TagAssignment + + Update a tag assignment + + :param entity_type: str + The type of entity to which the tag is assigned. Allowed value is dashboards + :param entity_id: str + The identifier of the entity to which the tag is assigned + :param tag_key: str + The key of the tag. The characters , . : / - = and leading/trailing spaces are not allowed + :param tag_assignment: :class:`TagAssignment` + :param update_mask: str + The field mask must be a single string, with multiple fields separated by commas (no spaces). The + field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., + `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only + the entire collection field can be specified. Field names must exactly match the resource field + names. + + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the + fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API + changes in the future. + + :returns: :class:`TagAssignment` + \ No newline at end of file diff --git a/docs/workspace/vectorsearch/vector_search_endpoints.rst b/docs/workspace/vectorsearch/vector_search_endpoints.rst index 47a8fa59a..02afc29e0 100644 --- a/docs/workspace/vectorsearch/vector_search_endpoints.rst +++ b/docs/workspace/vectorsearch/vector_search_endpoints.rst @@ -6,7 +6,7 @@ **Endpoint**: Represents the compute resources to host vector search indexes. - .. py:method:: create_endpoint(name: str, endpoint_type: EndpointType [, budget_policy_id: Optional[str]]) -> Wait[EndpointInfo] + .. py:method:: create_endpoint(name: str, endpoint_type: EndpointType [, budget_policy_id: Optional[str], num_replicas: Optional[int], usage_policy_id: Optional[str]]) -> Wait[EndpointInfo] Create a new endpoint. @@ -16,13 +16,17 @@ Type of endpoint :param budget_policy_id: str (optional) The budget policy id to be applied + :param num_replicas: int (optional) + Initial number of replicas for the endpoint. If not specified, defaults to 1. + :param usage_policy_id: str (optional) + The usage policy id to be applied once we've migrated to usage policies :returns: Long-running operation waiter for :class:`EndpointInfo`. See :method:wait_get_endpoint_vector_search_endpoint_online for more details. - .. py:method:: create_endpoint_and_wait(name: str, endpoint_type: EndpointType [, budget_policy_id: Optional[str], timeout: datetime.timedelta = 0:20:00]) -> EndpointInfo + .. py:method:: create_endpoint_and_wait(name: str, endpoint_type: EndpointType [, budget_policy_id: Optional[str], num_replicas: Optional[int], usage_policy_id: Optional[str], timeout: datetime.timedelta = 0:20:00]) -> EndpointInfo .. py:method:: delete_endpoint(endpoint_name: str) @@ -55,6 +59,50 @@ :returns: Iterator over :class:`EndpointInfo` + .. py:method:: patch_endpoint_throughput(endpoint_name: str [, all_or_nothing: Optional[bool], concurrency: Optional[float], maximum_concurrency_allowed: Optional[float], minimal_concurrency_allowed: Optional[float], num_replicas: Optional[int]]) -> PatchEndpointThroughputResponse + + Update the throughput (concurrency) of an endpoint + + :param endpoint_name: str + Name of the vector search endpoint + :param all_or_nothing: bool (optional) + If true, the request will fail if the requested concurrency or limits cannot be exactly met. If + false, the request will be adjusted to the closest possible value. + :param concurrency: float (optional) + Requested concurrency (total CPU) for the endpoint. If not specified, the current concurrency is + maintained. + :param maximum_concurrency_allowed: float (optional) + Maximum concurrency allowed for the endpoint. If not specified, the current maximum is maintained. + :param minimal_concurrency_allowed: float (optional) + Minimum concurrency allowed for the endpoint. If not specified, the current minimum is maintained. + :param num_replicas: int (optional) + Requested number of data copies for the endpoint (including primary). For example: num_replicas=2 + means 2 total copies of the data (1 primary + 1 replica). If not specified, the current replication + factor is maintained. Valid range: 1-6 (where 1 = no replication, 6 = 1 primary + 5 replicas). + + :returns: :class:`PatchEndpointThroughputResponse` + + + .. py:method:: retrieve_user_visible_metrics(name: str [, end_time: Optional[str], granularity_in_seconds: Optional[int], metrics: Optional[List[Metric]], page_token: Optional[str], start_time: Optional[str]]) -> RetrieveUserVisibleMetricsResponse + + Retrieve user-visible metrics for an endpoint + + :param name: str + Vector search endpoint name + :param end_time: str (optional) + End time for metrics query + :param granularity_in_seconds: int (optional) + Granularity in seconds + :param metrics: List[:class:`Metric`] (optional) + List of metrics to retrieve + :param page_token: str (optional) + Token for pagination + :param start_time: str (optional) + Start time for metrics query + + :returns: :class:`RetrieveUserVisibleMetricsResponse` + + .. py:method:: update_endpoint_budget_policy(endpoint_name: str, budget_policy_id: str) -> PatchEndpointBudgetPolicyResponse Update the budget policy of an endpoint diff --git a/docs/workspace/vectorsearch/vector_search_indexes.rst b/docs/workspace/vectorsearch/vector_search_indexes.rst index c89f3579c..46e48eec6 100644 --- a/docs/workspace/vectorsearch/vector_search_indexes.rst +++ b/docs/workspace/vectorsearch/vector_search_indexes.rst @@ -153,6 +153,16 @@ + .. py:method:: update_index_budget_policy(index_name: str) -> UpdateVectorIndexUsagePolicyResponse + + Update the budget policy of an index + + :param index_name: str + Name of the vector search index + + :returns: :class:`UpdateVectorIndexUsagePolicyResponse` + + .. py:method:: upsert_data_vector_index(index_name: str, inputs_json: str) -> UpsertDataVectorIndexResponse Handles the upserting of data into a specified vector index. diff --git a/docs/workspace/workspace/workspace.rst b/docs/workspace/workspace/workspace.rst index e1b7d12b9..8daae9143 100644 --- a/docs/workspace/workspace/workspace.rst +++ b/docs/workspace/workspace/workspace.rst @@ -63,7 +63,7 @@ :return: file-like `io.BinaryIO` of the `path` contents. - .. py:method:: export(path: str [, format: Optional[ExportFormat]]) -> ExportResponse + .. py:method:: export(path: str [, format: Optional[ExportFormat], outputs: Optional[ExportOutputs]]) -> ExportResponse Usage: @@ -79,7 +79,7 @@ notebook = f"/Users/{w.current_user.me().user_name}/sdk-{time.time_ns()}" - export_response = w.workspace.export(format=workspace.ExportFormat.SOURCE, path=notebook) + export_response = w.workspace.export_(format=workspace.ExportFormat.SOURCE, path=notebook) Exports an object or the contents of an entire directory. @@ -102,6 +102,11 @@ Directory exports will not include non-notebook entries. - `R_MARKDOWN`: The notebook is exported to R Markdown format. - `AUTO`: The object or directory is exported depending on the objects type. Directory exports will include notebooks and workspace files. + :param outputs: :class:`ExportOutputs` (optional) + This specifies which cell outputs should be included in the export (if the export format allows it). + If not specified, the behavior is determined by the format. For JUPYTER format, the default is to + include all outputs. This is a public endpoint, but only ALL or NONE is documented publically, + DATABRICKS is internal only :returns: :class:`ExportResponse` @@ -111,7 +116,9 @@ Gets the permission levels that a user can have on an object. :param workspace_object_type: str - The workspace object type for which to get or manage permissions. + The workspace object type for which to get or manage permissions. Could be one of the following: + alerts, alertsv2, dashboards, dbsql-dashboards, directories, experiments, files, genie, notebooks, + queries :param workspace_object_id: str The workspace object for which to get or manage permissions. @@ -124,7 +131,9 @@ parent objects or root object. :param workspace_object_type: str - The workspace object type for which to get or manage permissions. + The workspace object type for which to get or manage permissions. Could be one of the following: + alerts, alertsv2, dashboards, dbsql-dashboards, directories, experiments, files, genie, notebooks, + queries :param workspace_object_id: str The workspace object for which to get or manage permissions. @@ -263,7 +272,9 @@ object. :param workspace_object_type: str - The workspace object type for which to get or manage permissions. + The workspace object type for which to get or manage permissions. Could be one of the following: + alerts, alertsv2, dashboards, dbsql-dashboards, directories, experiments, files, genie, notebooks, + queries :param workspace_object_id: str The workspace object for which to get or manage permissions. :param access_control_list: List[:class:`WorkspaceObjectAccessControlRequest`] (optional) @@ -277,7 +288,9 @@ parent objects or root object. :param workspace_object_type: str - The workspace object type for which to get or manage permissions. + The workspace object type for which to get or manage permissions. Could be one of the following: + alerts, alertsv2, dashboards, dbsql-dashboards, directories, experiments, files, genie, notebooks, + queries :param workspace_object_id: str The workspace object for which to get or manage permissions. :param access_control_list: List[:class:`WorkspaceObjectAccessControlRequest`] (optional) diff --git a/tests/databricks/sdk/service/lrotesting.py b/tests/databricks/sdk/service/lrotesting.py index 679118220..c06822e8f 100755 --- a/tests/databricks/sdk/service/lrotesting.py +++ b/tests/databricks/sdk/service/lrotesting.py @@ -4,7 +4,6 @@ import logging from dataclasses import dataclass -from datetime import timedelta from enum import Enum from typing import Any, Dict, List, Optional @@ -20,11 +19,7 @@ @dataclass class DatabricksServiceExceptionWithDetailsProto: - """Serialization format for DatabricksServiceException with error details. This message doesn't - work for ScalaPB-04 as google.protobuf.Any is only available to ScalaPB-09. Note the definition - of this message should be in sync with DatabricksServiceExceptionProto defined in - /api-base/proto/legacy/databricks.proto except the later one doesn't have the error details - field defined.""" + """Databricks Error that is returned by all Databricks APIs.""" details: Optional[List[dict]] = None """@pbjson-skip""" @@ -174,24 +169,15 @@ class Operation: metadata: Optional[dict] = None """Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such - metadata. Any method that returns a long-running operation should document the metadata type, if - any.""" + metadata.""" name: Optional[str] = None """The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with - `operations/{unique_id}`. - - Note: multi-segment resource names are not yet supported in the RPC framework and SDK/TF. Until - that support is added, `name` must be string without internal `/` separators.""" + `operations/{unique_id}`.""" response: Optional[dict] = None - """The normal, successful response of the operation. If the original method returns no data on - success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is - standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the - response should have the type `XxxResponse`, where `Xxx` is the original method name. For - example, if the original method name is `TakeSnapshot()`, the inferred response type is - `TakeSnapshotResponse`.""" + """The normal, successful response of the operation.""" def as_dict(self) -> dict: """Serializes the Operation into a dictionary suitable for use as a JSON request body.""" @@ -380,13 +366,13 @@ def __init__(self, impl: LroTestingAPI, operation: Operation): self._operation = operation def wait(self, opts: Optional[lro.LroOptions] = None) -> TestResource: - """Wait blocks until the long-running operation is completed with default 20 min - timeout. If the operation didn't finish within the timeout, this function will - raise an error of type TimeoutError, otherwise returns successful response and - any errors encountered. + """Wait blocks until the long-running operation is completed. If no timeout is + specified, this will poll indefinitely. If a timeout is provided and the operation + didn't finish within the timeout, this function will raise an error of type + TimeoutError, otherwise returns successful response and any errors encountered. :param opts: :class:`LroOptions` - Timeout options (default: 20 minutes) + Timeout options (default: polls indefinitely) :returns: :class:`TestResource` """ @@ -414,7 +400,7 @@ def poll_operation(): return test_resource, None - return poll(poll_operation, timeout=opts.timeout if opts is not None else timedelta(minutes=20)) + return poll(poll_operation, timeout=opts.timeout if opts is not None else None) def cancel(self): """Starts asynchronous cancellation on a long-running operation. The server @@ -463,13 +449,13 @@ def __init__(self, impl: LroTestingAPI, operation: Operation): self._operation = operation def wait(self, opts: Optional[lro.LroOptions] = None): - """Wait blocks until the long-running operation is completed with default 20 min - timeout. If the operation didn't finish within the timeout, this function will - raise an error of type TimeoutError, otherwise returns successful response and - any errors encountered. + """Wait blocks until the long-running operation is completed. If no timeout is + specified, this will poll indefinitely. If a timeout is provided and the operation + didn't finish within the timeout, this function will raise an error of type + TimeoutError, otherwise returns successful response and any errors encountered. :param opts: :class:`LroOptions` - Timeout options (default: 20 minutes) + Timeout options (default: polls indefinitely) :returns: :class:`Any /* MISSING TYPE */` """ @@ -495,7 +481,7 @@ def poll_operation(): return {}, None - poll(poll_operation, timeout=opts.timeout if opts is not None else timedelta(minutes=20)) + poll(poll_operation, timeout=opts.timeout if opts is not None else None) def cancel(self): """Starts asynchronous cancellation on a long-running operation. The server diff --git a/tests/generated/test_json_marshall.py b/tests/generated/test_json_marshall.py index 16fc6fb26..bf5460f2e 100755 --- a/tests/generated/test_json_marshall.py +++ b/tests/generated/test_json_marshall.py @@ -190,7 +190,7 @@ def _fieldmask(d: str) -> FieldMask: required_string="non_default_string", required_struct={}, required_timestamp=_timestamp("2023-12-31T23:59:59Z"), - required_value=json.loads("{}"), + required_value=json.loads('{"key": "value"}'), test_required_enum=TestEnum.TEST_ENUM_TWO, ), """{ @@ -198,6 +198,7 @@ def _fieldmask(d: str) -> FieldMask: "required_int32": 42, "required_int64": 1234567890123456789, "required_bool": true, + "required_value": {"key": "value"}, "required_message": {}, "test_required_enum": "TEST_ENUM_TWO", "required_duration": "7200s", From 6c7b7634818fe1397353fcb6189d1c3d273d0d62 Mon Sep 17 00:00:00 2001 From: Divyansh Vijayvergia Date: Thu, 20 Nov 2025 17:04:54 +0000 Subject: [PATCH 2/2] test generation --- databricks/sdk/service/billing.py | 16 +- databricks/sdk/service/catalog.py | 9 +- databricks/sdk/service/dashboards.py | 31 +- databricks/sdk/service/database.py | 2 +- databricks/sdk/service/marketplace.py | 5 +- databricks/sdk/service/ml.py | 423 +++++++++++++++++- databricks/sdk/service/postgres.py | 82 ++++ databricks/sdk/service/settingsv2.py | 239 +++++++++- databricks/sdk/service/tags.py | 42 +- docs/account/billing/usage_dashboards.rst | 4 +- docs/account/settingsv2/settings_v2.rst | 53 ++- docs/dbdataclasses/billing.rst | 8 + docs/dbdataclasses/catalog.rst | 2 +- docs/dbdataclasses/ml.rst | 28 ++ docs/dbdataclasses/postgres.rst | 4 + docs/dbdataclasses/settingsv2.rst | 8 + docs/workspace/dashboards/genie.rst | 15 +- docs/workspace/ml/feature_engineering.rst | 57 +++ docs/workspace/ml/feature_store.rst | 10 + docs/workspace/postgres/postgres.rst | 9 + .../settingsv2/workspace_settings_v2.rst | 6 +- docs/workspace/tags/tag_policies.rst | 40 +- 22 files changed, 1057 insertions(+), 36 deletions(-) diff --git a/databricks/sdk/service/billing.py b/databricks/sdk/service/billing.py index 18704daa1..20455a8f2 100755 --- a/databricks/sdk/service/billing.py +++ b/databricks/sdk/service/billing.py @@ -1472,6 +1472,12 @@ def from_dict(cls, d: Dict[str, Any]) -> UpdateBudgetConfigurationResponse: return cls(budget=_from_dict(d, "budget", BudgetConfiguration)) +class UsageDashboardMajorVersion(Enum): + + USAGE_DASHBOARD_MAJOR_VERSION_1 = "USAGE_DASHBOARD_MAJOR_VERSION_1" + USAGE_DASHBOARD_MAJOR_VERSION_2 = "USAGE_DASHBOARD_MAJOR_VERSION_2" + + class UsageDashboardType(Enum): USAGE_DASHBOARD_TYPE_GLOBAL = "USAGE_DASHBOARD_TYPE_GLOBAL" @@ -2129,13 +2135,19 @@ def __init__(self, api_client): self._api = api_client def create( - self, *, dashboard_type: Optional[UsageDashboardType] = None, workspace_id: Optional[int] = None + self, + *, + dashboard_type: Optional[UsageDashboardType] = None, + major_version: Optional[UsageDashboardMajorVersion] = None, + workspace_id: Optional[int] = None, ) -> CreateBillingUsageDashboardResponse: """Create a usage dashboard specified by workspaceId, accountId, and dashboard type. :param dashboard_type: :class:`UsageDashboardType` (optional) Workspace level usage dashboard shows usage data for the specified workspace ID. Global level usage dashboard shows usage data for all workspaces in the account. + :param major_version: :class:`UsageDashboardMajorVersion` (optional) + The major version of the usage dashboard template to use. Defaults to VERSION_1. :param workspace_id: int (optional) The workspace ID of the workspace in which the usage dashboard is created. @@ -2145,6 +2157,8 @@ def create( body = {} if dashboard_type is not None: body["dashboard_type"] = dashboard_type.value + if major_version is not None: + body["major_version"] = major_version.value if workspace_id is not None: body["workspace_id"] = workspace_id headers = { diff --git a/databricks/sdk/service/catalog.py b/databricks/sdk/service/catalog.py index fb2f7e0be..7c27279c4 100755 --- a/databricks/sdk/service/catalog.py +++ b/databricks/sdk/service/catalog.py @@ -30,6 +30,8 @@ class AccessRequestDestinations: """Indicates whether any destinations are hidden from the caller due to a lack of permissions. This value is true if the caller does not have permission to see all destinations.""" + destination_source_securable: Optional[Securable] = None + destinations: Optional[List[NotificationDestination]] = None """The access request destinations for the securable.""" @@ -38,6 +40,8 @@ def as_dict(self) -> dict: body = {} if self.are_any_destinations_hidden is not None: body["are_any_destinations_hidden"] = self.are_any_destinations_hidden + if self.destination_source_securable: + body["destination_source_securable"] = self.destination_source_securable.as_dict() if self.destinations: body["destinations"] = [v.as_dict() for v in self.destinations] if self.securable: @@ -49,6 +53,8 @@ def as_shallow_dict(self) -> dict: body = {} if self.are_any_destinations_hidden is not None: body["are_any_destinations_hidden"] = self.are_any_destinations_hidden + if self.destination_source_securable: + body["destination_source_securable"] = self.destination_source_securable if self.destinations: body["destinations"] = self.destinations if self.securable: @@ -60,6 +66,7 @@ def from_dict(cls, d: Dict[str, Any]) -> AccessRequestDestinations: """Deserializes the AccessRequestDestinations from a dictionary.""" return cls( are_any_destinations_hidden=d.get("are_any_destinations_hidden", None), + destination_source_securable=_from_dict(d, "destination_source_securable", Securable), destinations=_repeated_dict(d, "destinations", NotificationDestination), securable=_from_dict(d, "securable", Securable), ) @@ -8906,7 +8913,7 @@ def from_dict(cls, d: Dict[str, Any]) -> Securable: class SecurableKind(Enum): - """Latest kind: CONNECTION_SLACK_OAUTH_U2M_MAPPING = 272; Next id:273""" + """Latest kind: SECRET_EXTERNAL_AWS_SECRETS_MANAGER = 273; Next id:274""" TABLE_DB_STORAGE = "TABLE_DB_STORAGE" TABLE_DELTA = "TABLE_DELTA" diff --git a/databricks/sdk/service/dashboards.py b/databricks/sdk/service/dashboards.py index b7e68de0e..8c760bde5 100755 --- a/databricks/sdk/service/dashboards.py +++ b/databricks/sdk/service/dashboards.py @@ -993,6 +993,12 @@ class GenieSpace: description: Optional[str] = None """Description of the Genie Space""" + serialized_space: Optional[str] = None + """The contents of the Genie Space in serialized string form. This field is excluded in List Genie + spaces responses. Use the [Get Genie Space](:method:genie/getspace) API to retrieve an example + response, which includes the `serialized_space` field. This field provides the structure of the + JSON string that represents the space's layout and components.""" + warehouse_id: Optional[str] = None """Warehouse associated with the Genie Space""" @@ -1001,6 +1007,8 @@ def as_dict(self) -> dict: body = {} if self.description is not None: body["description"] = self.description + if self.serialized_space is not None: + body["serialized_space"] = self.serialized_space if self.space_id is not None: body["space_id"] = self.space_id if self.title is not None: @@ -1014,6 +1022,8 @@ def as_shallow_dict(self) -> dict: body = {} if self.description is not None: body["description"] = self.description + if self.serialized_space is not None: + body["serialized_space"] = self.serialized_space if self.space_id is not None: body["space_id"] = self.space_id if self.title is not None: @@ -1027,6 +1037,7 @@ def from_dict(cls, d: Dict[str, Any]) -> GenieSpace: """Deserializes the GenieSpace from a dictionary.""" return cls( description=d.get("description", None), + serialized_space=d.get("serialized_space", None), space_id=d.get("space_id", None), title=d.get("title", None), warehouse_id=d.get("warehouse_id", None), @@ -2227,7 +2238,10 @@ def create_space( :param warehouse_id: str Warehouse to associate with the new space :param serialized_space: str - Serialized export model for the space contents + The contents of the Genie Space in serialized string form. Use the [Get Genie + Space](:method:genie/getspace) API to retrieve an example response, which includes the + `serialized_space` field. This field provides the structure of the JSON string that represents the + space's layout and components. :param description: str (optional) Optional description :param parent_path: str (optional) @@ -2531,20 +2545,26 @@ def get_message_query_result_by_attachment( ) return GenieGetMessageQueryResultResponse.from_dict(res) - def get_space(self, space_id: str) -> GenieSpace: + def get_space(self, space_id: str, *, include_serialized_space: Optional[bool] = None) -> GenieSpace: """Get details of a Genie Space. :param space_id: str The ID associated with the Genie space + :param include_serialized_space: bool (optional) + Whether to include the serialized space export in the response. Requires at least CAN EDIT + permission on the space. :returns: :class:`GenieSpace` """ + query = {} + if include_serialized_space is not None: + query["include_serialized_space"] = include_serialized_space headers = { "Accept": "application/json", } - res = self._api.do("GET", f"/api/2.0/genie/spaces/{space_id}", headers=headers) + res = self._api.do("GET", f"/api/2.0/genie/spaces/{space_id}", query=query, headers=headers) return GenieSpace.from_dict(res) def list_conversation_messages( @@ -2751,7 +2771,10 @@ def update_space( :param description: str (optional) Optional description :param serialized_space: str (optional) - Serialized export model for the space contents (full replacement) + The contents of the Genie Space in serialized string form (full replacement). Use the [Get Genie + Space](:method:genie/getspace) API to retrieve an example response, which includes the + `serialized_space` field. This field provides the structure of the JSON string that represents the + space's layout and components. :param title: str (optional) Optional title override :param warehouse_id: str (optional) diff --git a/databricks/sdk/service/database.py b/databricks/sdk/service/database.py index cad102d20..72d9e7ec6 100755 --- a/databricks/sdk/service/database.py +++ b/databricks/sdk/service/database.py @@ -849,7 +849,7 @@ class NewPipelineSpec: fields of pipeline are still inferred by table def internally""" budget_policy_id: Optional[str] = None - """Budget policy of this pipeline.""" + """Budget policy to set on the newly created pipeline.""" storage_catalog: Optional[str] = None """This field needs to be specified if the destination catalog is a managed postgres catalog. diff --git a/databricks/sdk/service/marketplace.py b/databricks/sdk/service/marketplace.py index 5e5ccc267..84d761c36 100755 --- a/databricks/sdk/service/marketplace.py +++ b/databricks/sdk/service/marketplace.py @@ -3,7 +3,6 @@ from __future__ import annotations import logging -import uuid from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Iterator, List, Optional @@ -2165,6 +2164,8 @@ class PersonalizationRequest: recipient_type: Optional[DeltaSharingRecipientType] = None share: Optional[ShareInfo] = None + """Share information is required for data listings but should be empty/ignored for non-data + listings (MCP and App).""" status: Optional[PersonalizationRequestStatus] = None @@ -4096,8 +4097,6 @@ def update( :returns: :class:`UpdatePersonalizationRequestResponse` """ - if request_id is None or request_id == "": - request_id = str(uuid.uuid4()) body = {} if reason is not None: body["reason"] = reason diff --git a/databricks/sdk/service/ml.py b/databricks/sdk/service/ml.py index 2fe43aa93..43c68bc37 100755 --- a/databricks/sdk/service/ml.py +++ b/databricks/sdk/service/ml.py @@ -10,6 +10,7 @@ from enum import Enum from typing import Any, Callable, Dict, Iterator, List, Optional +from databricks.sdk.common.types.fieldmask import FieldMask from databricks.sdk.service._internal import (Wait, _enum, _from_dict, _repeated_dict, _repeated_enum) @@ -201,6 +202,32 @@ def from_dict(cls, d: Dict[str, Any]) -> ApproveTransitionRequestResponse: return cls(activity=_from_dict(d, "activity", Activity)) +@dataclass +class AuthConfig: + uc_service_credential_name: Optional[str] = None + """Name of the Unity Catalog service credential. This value will be set under the option + databricks.serviceCredential""" + + def as_dict(self) -> dict: + """Serializes the AuthConfig into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.uc_service_credential_name is not None: + body["uc_service_credential_name"] = self.uc_service_credential_name + return body + + def as_shallow_dict(self) -> dict: + """Serializes the AuthConfig into a shallow dictionary of its immediate attributes.""" + body = {} + if self.uc_service_credential_name is not None: + body["uc_service_credential_name"] = self.uc_service_credential_name + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> AuthConfig: + """Deserializes the AuthConfig from a dictionary.""" + return cls(uc_service_credential_name=d.get("uc_service_credential_name", None)) + + @dataclass class BatchCreateMaterializedFeaturesResponse: materialized_features: Optional[List[MaterializedFeature]] = None @@ -226,6 +253,33 @@ def from_dict(cls, d: Dict[str, Any]) -> BatchCreateMaterializedFeaturesResponse return cls(materialized_features=_repeated_dict(d, "materialized_features", MaterializedFeature)) +@dataclass +class ColumnIdentifier: + variant_expr_path: str + """String representation of the column name or variant expression path. For nested fields, the leaf + value is what will be present in materialized tables and expected to match at query time. For + example, the leaf node of value:trip_details.location_details.pickup_zip is pickup_zip.""" + + def as_dict(self) -> dict: + """Serializes the ColumnIdentifier into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.variant_expr_path is not None: + body["variant_expr_path"] = self.variant_expr_path + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ColumnIdentifier into a shallow dictionary of its immediate attributes.""" + body = {} + if self.variant_expr_path is not None: + body["variant_expr_path"] = self.variant_expr_path + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ColumnIdentifier: + """Deserializes the ColumnIdentifier from a dictionary.""" + return cls(variant_expr_path=d.get("variant_expr_path", None)) + + class CommentActivityAction(Enum): """An action that a user (with sufficient permissions) could take on an activity or comment. @@ -600,11 +654,15 @@ def from_dict(cls, d: Dict[str, Any]) -> CreateWebhookResponse: class DataSource: delta_table_source: Optional[DeltaTableSource] = None + kafka_source: Optional[KafkaSource] = None + def as_dict(self) -> dict: """Serializes the DataSource into a dictionary suitable for use as a JSON request body.""" body = {} if self.delta_table_source: body["delta_table_source"] = self.delta_table_source.as_dict() + if self.kafka_source: + body["kafka_source"] = self.kafka_source.as_dict() return body def as_shallow_dict(self) -> dict: @@ -612,12 +670,17 @@ def as_shallow_dict(self) -> dict: body = {} if self.delta_table_source: body["delta_table_source"] = self.delta_table_source + if self.kafka_source: + body["kafka_source"] = self.kafka_source return body @classmethod def from_dict(cls, d: Dict[str, Any]) -> DataSource: """Deserializes the DataSource from a dictionary.""" - return cls(delta_table_source=_from_dict(d, "delta_table_source", DeltaTableSource)) + return cls( + delta_table_source=_from_dict(d, "delta_table_source", DeltaTableSource), + kafka_source=_from_dict(d, "kafka_source", KafkaSource), + ) @dataclass @@ -2388,6 +2451,130 @@ def from_dict(cls, d: Dict[str, Any]) -> JobSpecWithoutSecret: return cls(job_id=d.get("job_id", None), workspace_url=d.get("workspace_url", None)) +@dataclass +class KafkaConfig: + name: str + """Name that uniquely identifies this Kafka config within the metastore. This will be the + identifier used from the Feature object to reference these configs for a feature. Can be + distinct from topic name.""" + + bootstrap_servers: str + """A comma-separated list of host/port pairs pointing to Kafka cluster.""" + + subscription_mode: SubscriptionMode + """Options to configure which Kafka topics to pull data from.""" + + auth_config: AuthConfig + """Authentication configuration for connection to topics.""" + + extra_options: Optional[Dict[str, str]] = None + """Catch-all for miscellaneous options. Keys should be source options or Kafka consumer options + (kafka.*)""" + + key_schema: Optional[SchemaConfig] = None + """Schema configuration for extracting message keys from topics. At least one of key_schema and + value_schema must be provided.""" + + value_schema: Optional[SchemaConfig] = None + """Schema configuration for extracting message values from topics. At least one of key_schema and + value_schema must be provided.""" + + def as_dict(self) -> dict: + """Serializes the KafkaConfig into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.auth_config: + body["auth_config"] = self.auth_config.as_dict() + if self.bootstrap_servers is not None: + body["bootstrap_servers"] = self.bootstrap_servers + if self.extra_options: + body["extra_options"] = self.extra_options + if self.key_schema: + body["key_schema"] = self.key_schema.as_dict() + if self.name is not None: + body["name"] = self.name + if self.subscription_mode: + body["subscription_mode"] = self.subscription_mode.as_dict() + if self.value_schema: + body["value_schema"] = self.value_schema.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the KafkaConfig into a shallow dictionary of its immediate attributes.""" + body = {} + if self.auth_config: + body["auth_config"] = self.auth_config + if self.bootstrap_servers is not None: + body["bootstrap_servers"] = self.bootstrap_servers + if self.extra_options: + body["extra_options"] = self.extra_options + if self.key_schema: + body["key_schema"] = self.key_schema + if self.name is not None: + body["name"] = self.name + if self.subscription_mode: + body["subscription_mode"] = self.subscription_mode + if self.value_schema: + body["value_schema"] = self.value_schema + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> KafkaConfig: + """Deserializes the KafkaConfig from a dictionary.""" + return cls( + auth_config=_from_dict(d, "auth_config", AuthConfig), + bootstrap_servers=d.get("bootstrap_servers", None), + extra_options=d.get("extra_options", None), + key_schema=_from_dict(d, "key_schema", SchemaConfig), + name=d.get("name", None), + subscription_mode=_from_dict(d, "subscription_mode", SubscriptionMode), + value_schema=_from_dict(d, "value_schema", SchemaConfig), + ) + + +@dataclass +class KafkaSource: + name: str + """Name of the Kafka source, used to identify it. This is used to look up the corresponding + KafkaConfig object. Can be distinct from topic name.""" + + entity_column_identifiers: List[ColumnIdentifier] + """The entity column identifiers of the Kafka source.""" + + timeseries_column_identifier: ColumnIdentifier + """The timeseries column identifier of the Kafka source.""" + + def as_dict(self) -> dict: + """Serializes the KafkaSource into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.entity_column_identifiers: + body["entity_column_identifiers"] = [v.as_dict() for v in self.entity_column_identifiers] + if self.name is not None: + body["name"] = self.name + if self.timeseries_column_identifier: + body["timeseries_column_identifier"] = self.timeseries_column_identifier.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the KafkaSource into a shallow dictionary of its immediate attributes.""" + body = {} + if self.entity_column_identifiers: + body["entity_column_identifiers"] = self.entity_column_identifiers + if self.name is not None: + body["name"] = self.name + if self.timeseries_column_identifier: + body["timeseries_column_identifier"] = self.timeseries_column_identifier + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> KafkaSource: + """Deserializes the KafkaSource from a dictionary.""" + return cls( + entity_column_identifiers=_repeated_dict(d, "entity_column_identifiers", ColumnIdentifier), + name=d.get("name", None), + timeseries_column_identifier=_from_dict(d, "timeseries_column_identifier", ColumnIdentifier), + ) + + @dataclass class LineageContext: """Lineage context information for tracking where an API was invoked. This will allow us to track @@ -2614,6 +2801,41 @@ def from_dict(cls, d: Dict[str, Any]) -> ListFeaturesResponse: return cls(features=_repeated_dict(d, "features", Feature), next_page_token=d.get("next_page_token", None)) +@dataclass +class ListKafkaConfigsResponse: + kafka_configs: List[KafkaConfig] + """List of Kafka configs. Schemas are not included in the response.""" + + next_page_token: Optional[str] = None + """Pagination token to request the next page of results for this query.""" + + def as_dict(self) -> dict: + """Serializes the ListKafkaConfigsResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.kafka_configs: + body["kafka_configs"] = [v.as_dict() for v in self.kafka_configs] + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ListKafkaConfigsResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.kafka_configs: + body["kafka_configs"] = self.kafka_configs + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ListKafkaConfigsResponse: + """Deserializes the ListKafkaConfigsResponse from a dictionary.""" + return cls( + kafka_configs=_repeated_dict(d, "kafka_configs", KafkaConfig), + next_page_token=d.get("next_page_token", None), + ) + + @dataclass class ListMaterializedFeaturesResponse: materialized_features: Optional[List[MaterializedFeature]] = None @@ -5009,6 +5231,31 @@ def from_dict(cls, d: Dict[str, Any]) -> RunTag: return cls(key=d.get("key", None), value=d.get("value", None)) +@dataclass +class SchemaConfig: + json_schema: Optional[str] = None + """Schema of the JSON object in standard IETF JSON schema format (https://json-schema.org/)""" + + def as_dict(self) -> dict: + """Serializes the SchemaConfig into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.json_schema is not None: + body["json_schema"] = self.json_schema + return body + + def as_shallow_dict(self) -> dict: + """Serializes the SchemaConfig into a shallow dictionary of its immediate attributes.""" + body = {} + if self.json_schema is not None: + body["json_schema"] = self.json_schema + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> SchemaConfig: + """Deserializes the SchemaConfig from a dictionary.""" + return cls(json_schema=d.get("json_schema", None)) + + @dataclass class SearchExperimentsResponse: experiments: Optional[List[Experiment]] = None @@ -5400,6 +5647,51 @@ class Status(Enum): READY = "READY" +@dataclass +class SubscriptionMode: + assign: Optional[str] = None + """A JSON string that contains the specific topic-partitions to consume from. For example, for + '{"topicA":[0,1],"topicB":[2,4]}', topicA's 0'th and 1st partitions will be consumed from.""" + + subscribe: Optional[str] = None + """A comma-separated list of Kafka topics to read from. For example, 'topicA,topicB,topicC'.""" + + subscribe_pattern: Optional[str] = None + """A regular expression matching topics to subscribe to. For example, 'topic.*' will subscribe to + all topics starting with 'topic'.""" + + def as_dict(self) -> dict: + """Serializes the SubscriptionMode into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.assign is not None: + body["assign"] = self.assign + if self.subscribe is not None: + body["subscribe"] = self.subscribe + if self.subscribe_pattern is not None: + body["subscribe_pattern"] = self.subscribe_pattern + return body + + def as_shallow_dict(self) -> dict: + """Serializes the SubscriptionMode into a shallow dictionary of its immediate attributes.""" + body = {} + if self.assign is not None: + body["assign"] = self.assign + if self.subscribe is not None: + body["subscribe"] = self.subscribe + if self.subscribe_pattern is not None: + body["subscribe_pattern"] = self.subscribe_pattern + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> SubscriptionMode: + """Deserializes the SubscriptionMode from a dictionary.""" + return cls( + assign=d.get("assign", None), + subscribe=d.get("subscribe", None), + subscribe_pattern=d.get("subscribe_pattern", None), + ) + + @dataclass class TestRegistryWebhookResponse: body: Optional[str] = None @@ -7114,6 +7406,23 @@ def create_feature(self, feature: Feature) -> Feature: res = self._api.do("POST", "/api/2.0/feature-engineering/features", body=body, headers=headers) return Feature.from_dict(res) + def create_kafka_config(self, kafka_config: KafkaConfig) -> KafkaConfig: + """Create a Kafka config. + + :param kafka_config: :class:`KafkaConfig` + + :returns: :class:`KafkaConfig` + """ + + body = kafka_config.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", "/api/2.0/feature-engineering/features/kafka-configs", body=body, headers=headers) + return KafkaConfig.from_dict(res) + def create_materialized_feature(self, materialized_feature: MaterializedFeature) -> MaterializedFeature: """Create a materialized feature. @@ -7147,6 +7456,21 @@ def delete_feature(self, full_name: str): self._api.do("DELETE", f"/api/2.0/feature-engineering/features/{full_name}", headers=headers) + def delete_kafka_config(self, name: str): + """Delete a Kafka config. + + :param name: str + Name of the Kafka config to delete. + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do("DELETE", f"/api/2.0/feature-engineering/features/kafka-configs/kafka/{name}", headers=headers) + def delete_materialized_feature(self, materialized_feature_id: str): """Delete a materialized feature. @@ -7180,6 +7504,22 @@ def get_feature(self, full_name: str) -> Feature: res = self._api.do("GET", f"/api/2.0/feature-engineering/features/{full_name}", headers=headers) return Feature.from_dict(res) + def get_kafka_config(self, name: str) -> KafkaConfig: + """Get a Kafka config. + + :param name: str + Name of the Kafka config to get. + + :returns: :class:`KafkaConfig` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", f"/api/2.0/feature-engineering/features/kafka-configs/{name}", headers=headers) + return KafkaConfig.from_dict(res) + def get_materialized_feature(self, materialized_feature_id: str) -> MaterializedFeature: """Get a materialized feature. @@ -7227,6 +7567,39 @@ def list_features(self, *, page_size: Optional[int] = None, page_token: Optional return query["page_token"] = json["next_page_token"] + def list_kafka_configs( + self, *, page_size: Optional[int] = None, page_token: Optional[str] = None + ) -> Iterator[KafkaConfig]: + """List Kafka configs. + + :param page_size: int (optional) + The maximum number of results to return. + :param page_token: str (optional) + Pagination token to go to the next page based on a previous query. + + :returns: Iterator over :class:`KafkaConfig` + """ + + query = {} + if page_size is not None: + query["page_size"] = page_size + if page_token is not None: + query["page_token"] = page_token + headers = { + "Accept": "application/json", + } + + while True: + json = self._api.do( + "GET", "/api/2.0/feature-engineering/features/kafka-configs", query=query, headers=headers + ) + if "kafka_configs" in json: + for v in json["kafka_configs"]: + yield KafkaConfig.from_dict(v) + if "next_page_token" not in json or not json["next_page_token"]: + return + query["page_token"] = json["next_page_token"] + def list_materialized_features( self, *, feature_name: Optional[str] = None, page_size: Optional[int] = None, page_token: Optional[str] = None ) -> Iterator[MaterializedFeature]: @@ -7293,6 +7666,39 @@ def update_feature(self, full_name: str, feature: Feature, update_mask: str) -> ) return Feature.from_dict(res) + def update_kafka_config(self, name: str, kafka_config: KafkaConfig, update_mask: FieldMask) -> KafkaConfig: + """Update a Kafka config. + + :param name: str + Name that uniquely identifies this Kafka config within the metastore. This will be the identifier + used from the Feature object to reference these configs for a feature. Can be distinct from topic + name. + :param kafka_config: :class:`KafkaConfig` + The Kafka config to update. + :param update_mask: FieldMask + The list of fields to update. + + :returns: :class:`KafkaConfig` + """ + + body = kafka_config.as_dict() + query = {} + if update_mask is not None: + query["update_mask"] = update_mask.ToJsonString() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "PATCH", + f"/api/2.0/feature-engineering/features/kafka-configs/{name}", + query=query, + body=body, + headers=headers, + ) + return KafkaConfig.from_dict(res) + def update_materialized_feature( self, materialized_feature_id: str, materialized_feature: MaterializedFeature, update_mask: str ) -> MaterializedFeature: @@ -7372,6 +7778,21 @@ def delete_online_store(self, name: str): self._api.do("DELETE", f"/api/2.0/feature-store/online-stores/{name}", headers=headers) + def delete_online_table(self, online_table_name: str): + """Delete online table. + + :param online_table_name: str + The full three-part (catalog, schema, table) name of the online table. + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do("DELETE", f"/api/2.0/feature-store/online-tables/{online_table_name}", headers=headers) + def get_online_store(self, name: str) -> OnlineStore: """Get an Online Feature Store. diff --git a/databricks/sdk/service/postgres.py b/databricks/sdk/service/postgres.py index 120f01005..04034efa3 100755 --- a/databricks/sdk/service/postgres.py +++ b/databricks/sdk/service/postgres.py @@ -168,6 +168,71 @@ def from_dict(cls, d: Dict[str, Any]) -> DatabaseBranchOperationMetadata: return cls() +@dataclass +class DatabaseCatalog: + name: str + """The name of the catalog in UC.""" + + database_name: str + """The name of the database (in a instance) associated with the catalog.""" + + create_database_if_not_exists: Optional[bool] = None + + database_branch_id: Optional[str] = None + """The branch_id of the database branch associated with the catalog.""" + + database_project_id: Optional[str] = None + """The project_id of the database project associated with the catalog.""" + + uid: Optional[str] = None + + def as_dict(self) -> dict: + """Serializes the DatabaseCatalog into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.create_database_if_not_exists is not None: + body["create_database_if_not_exists"] = self.create_database_if_not_exists + if self.database_branch_id is not None: + body["database_branch_id"] = self.database_branch_id + if self.database_name is not None: + body["database_name"] = self.database_name + if self.database_project_id is not None: + body["database_project_id"] = self.database_project_id + if self.name is not None: + body["name"] = self.name + if self.uid is not None: + body["uid"] = self.uid + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DatabaseCatalog into a shallow dictionary of its immediate attributes.""" + body = {} + if self.create_database_if_not_exists is not None: + body["create_database_if_not_exists"] = self.create_database_if_not_exists + if self.database_branch_id is not None: + body["database_branch_id"] = self.database_branch_id + if self.database_name is not None: + body["database_name"] = self.database_name + if self.database_project_id is not None: + body["database_project_id"] = self.database_project_id + if self.name is not None: + body["name"] = self.name + if self.uid is not None: + body["uid"] = self.uid + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DatabaseCatalog: + """Deserializes the DatabaseCatalog from a dictionary.""" + return cls( + create_database_if_not_exists=d.get("create_database_if_not_exists", None), + database_branch_id=d.get("database_branch_id", None), + database_name=d.get("database_name", None), + database_project_id=d.get("database_project_id", None), + name=d.get("name", None), + uid=d.get("uid", None), + ) + + @dataclass class DatabaseEndpoint: autoscaling_limit_max_cu: Optional[float] = None @@ -1052,6 +1117,23 @@ def create_database_branch( operation = Operation.from_dict(res) return CreateDatabaseBranchOperation(self, operation) + def create_database_catalog(self, catalog: DatabaseCatalog) -> DatabaseCatalog: + """Create a Database Catalog. + + :param catalog: :class:`DatabaseCatalog` + + :returns: :class:`DatabaseCatalog` + """ + + body = catalog.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", "/api/2.0/postgres/catalogs", body=body, headers=headers) + return DatabaseCatalog.from_dict(res) + def create_database_endpoint( self, parent: str, database_endpoint: DatabaseEndpoint, *, database_endpoint_id: Optional[str] = None ) -> CreateDatabaseEndpointOperation: diff --git a/databricks/sdk/service/settingsv2.py b/databricks/sdk/service/settingsv2.py index a529d7a5a..50287b14a 100755 --- a/databricks/sdk/service/settingsv2.py +++ b/databricks/sdk/service/settingsv2.py @@ -382,6 +382,42 @@ def from_dict(cls, d: Dict[str, Any]) -> ListAccountSettingsMetadataResponse: ) +@dataclass +class ListAccountUserPreferencesMetadataResponse: + next_page_token: Optional[str] = None + """A token that can be sent as `page_token` to retrieve the next page. If this field is omitted, + there are no subsequent pages.""" + + settings_metadata: Optional[List[SettingsMetadata]] = None + """List of all settings available via public APIs and their metadata""" + + def as_dict(self) -> dict: + """Serializes the ListAccountUserPreferencesMetadataResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + if self.settings_metadata: + body["settings_metadata"] = [v.as_dict() for v in self.settings_metadata] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ListAccountUserPreferencesMetadataResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + if self.settings_metadata: + body["settings_metadata"] = self.settings_metadata + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ListAccountUserPreferencesMetadataResponse: + """Deserializes the ListAccountUserPreferencesMetadataResponse from a dictionary.""" + return cls( + next_page_token=d.get("next_page_token", None), + settings_metadata=_repeated_dict(d, "settings_metadata", SettingsMetadata), + ) + + @dataclass class ListWorkspaceSettingsMetadataResponse: next_page_token: Optional[str] = None @@ -486,39 +522,72 @@ class RestrictWorkspaceAdminsMessageStatus(Enum): @dataclass class Setting: aibi_dashboard_embedding_access_policy: Optional[AibiDashboardEmbeddingAccessPolicy] = None + """Setting value for aibi_dashboard_embedding_access_policy setting. This is the setting value set + by consumers, check effective_aibi_dashboard_embedding_access_policy for final setting value.""" aibi_dashboard_embedding_approved_domains: Optional[AibiDashboardEmbeddingApprovedDomains] = None + """Setting value for aibi_dashboard_embedding_approved_domains setting. This is the setting value + set by consumers, check effective_aibi_dashboard_embedding_approved_domains for final setting + value.""" automatic_cluster_update_workspace: Optional[ClusterAutoRestartMessage] = None + """Setting value for automatic_cluster_update_workspace setting. This is the setting value set by + consumers, check effective_automatic_cluster_update_workspace for final setting value.""" boolean_val: Optional[BooleanMessage] = None + """Setting value for boolean type setting. This is the setting value set by consumers, check + effective_boolean_val for final setting value.""" effective_aibi_dashboard_embedding_access_policy: Optional[AibiDashboardEmbeddingAccessPolicy] = None + """Effective setting value for aibi_dashboard_embedding_access_policy setting. This is the final + effective value of setting. To set a value use aibi_dashboard_embedding_access_policy.""" effective_aibi_dashboard_embedding_approved_domains: Optional[AibiDashboardEmbeddingApprovedDomains] = None + """Effective setting value for aibi_dashboard_embedding_approved_domains setting. This is the final + effective value of setting. To set a value use aibi_dashboard_embedding_approved_domains.""" effective_automatic_cluster_update_workspace: Optional[ClusterAutoRestartMessage] = None + """Effective setting value for automatic_cluster_update_workspace setting. This is the final + effective value of setting. To set a value use automatic_cluster_update_workspace.""" effective_boolean_val: Optional[BooleanMessage] = None + """Effective setting value for boolean type setting. This is the final effective value of setting. + To set a value use boolean_val.""" effective_integer_val: Optional[IntegerMessage] = None + """Effective setting value for integer type setting. This is the final effective value of setting. + To set a value use integer_val.""" effective_personal_compute: Optional[PersonalComputeMessage] = None + """Effective setting value for personal_compute setting. This is the final effective value of + setting. To set a value use personal_compute.""" effective_restrict_workspace_admins: Optional[RestrictWorkspaceAdminsMessage] = None + """Effective setting value for restrict_workspace_admins setting. This is the final effective value + of setting. To set a value use restrict_workspace_admins.""" effective_string_val: Optional[StringMessage] = None + """Effective setting value for string type setting. This is the final effective value of setting. + To set a value use string_val.""" integer_val: Optional[IntegerMessage] = None + """Setting value for integer type setting. This is the setting value set by consumers, check + effective_integer_val for final setting value.""" name: Optional[str] = None """Name of the setting.""" personal_compute: Optional[PersonalComputeMessage] = None + """Setting value for personal_compute setting. This is the setting value set by consumers, check + effective_personal_compute for final setting value.""" restrict_workspace_admins: Optional[RestrictWorkspaceAdminsMessage] = None + """Setting value for restrict_workspace_admins setting. This is the setting value set by consumers, + check effective_restrict_workspace_admins for final setting value.""" string_val: Optional[StringMessage] = None + """Setting value for string type setting. This is the setting value set by consumers, check + effective_string_val for final setting value.""" def as_dict(self) -> dict: """Serializes the Setting into a dictionary suitable for use as a JSON request body.""" @@ -658,7 +727,8 @@ class SettingsMetadata: """Name of the setting.""" type: Optional[str] = None - """Type of the setting. To set this setting, the value sent must match this type.""" + """Sample message depicting the type of the setting. To set this setting, the value sent must match + this type.""" def as_dict(self) -> dict: """Serializes the SettingsMetadata into a dictionary suitable for use as a JSON request body.""" @@ -722,6 +792,69 @@ def from_dict(cls, d: Dict[str, Any]) -> StringMessage: return cls(value=d.get("value", None)) +@dataclass +class UserPreference: + boolean_val: Optional[BooleanMessage] = None + + effective_boolean_val: Optional[BooleanMessage] = None + + effective_string_val: Optional[StringMessage] = None + + name: Optional[str] = None + """Name of the setting.""" + + string_val: Optional[StringMessage] = None + + user_id: Optional[str] = None + """User ID of the user.""" + + def as_dict(self) -> dict: + """Serializes the UserPreference into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.boolean_val: + body["boolean_val"] = self.boolean_val.as_dict() + if self.effective_boolean_val: + body["effective_boolean_val"] = self.effective_boolean_val.as_dict() + if self.effective_string_val: + body["effective_string_val"] = self.effective_string_val.as_dict() + if self.name is not None: + body["name"] = self.name + if self.string_val: + body["string_val"] = self.string_val.as_dict() + if self.user_id is not None: + body["user_id"] = self.user_id + return body + + def as_shallow_dict(self) -> dict: + """Serializes the UserPreference into a shallow dictionary of its immediate attributes.""" + body = {} + if self.boolean_val: + body["boolean_val"] = self.boolean_val + if self.effective_boolean_val: + body["effective_boolean_val"] = self.effective_boolean_val + if self.effective_string_val: + body["effective_string_val"] = self.effective_string_val + if self.name is not None: + body["name"] = self.name + if self.string_val: + body["string_val"] = self.string_val + if self.user_id is not None: + body["user_id"] = self.user_id + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> UserPreference: + """Deserializes the UserPreference from a dictionary.""" + return cls( + boolean_val=_from_dict(d, "boolean_val", BooleanMessage), + effective_boolean_val=_from_dict(d, "effective_boolean_val", BooleanMessage), + effective_string_val=_from_dict(d, "effective_string_val", StringMessage), + name=d.get("name", None), + string_val=_from_dict(d, "string_val", StringMessage), + user_id=d.get("user_id", None), + ) + + class AccountSettingsV2API: """APIs to manage account level settings""" @@ -744,6 +877,27 @@ def get_public_account_setting(self, name: str) -> Setting: res = self._api.do("GET", f"/api/2.1/accounts/{self._api.account_id}/settings/{name}", headers=headers) return Setting.from_dict(res) + def get_public_account_user_preference(self, user_id: str, name: str) -> UserPreference: + """Get a setting value for a specific user at account level. See + :method:settingsv2/listaccountuserpreferencesmetadata for list of setting available via public APIs. + + :param user_id: str + User ID of the user whose setting is being retrieved. + :param name: str + User Setting name. + + :returns: :class:`UserPreference` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do( + "GET", f"/api/2.1/accounts/{self._api.account_id}/users/{user_id}/settings/{name}", headers=headers + ) + return UserPreference.from_dict(res) + def list_account_settings_metadata( self, *, page_size: Optional[int] = None, page_token: Optional[str] = None ) -> Iterator[SettingsMetadata]: @@ -784,9 +938,57 @@ def list_account_settings_metadata( return query["page_token"] = json["next_page_token"] + def list_account_user_preferences_metadata( + self, user_id: str, *, page_size: Optional[int] = None, page_token: Optional[str] = None + ) -> Iterator[SettingsMetadata]: + """List valid setting keys and metadata for a specific user. These settings are available to be + referenced via GET :method:settingsv2/getpublicaccountuserpreference and PATCH + :method:settingsv2/patchpublicaccountuserpreference APIs + + :param user_id: str + User ID of the user whose settings metadata is being retrieved. + :param page_size: int (optional) + The maximum number of settings to return. The service may return fewer than this value. If + unspecified, at most 200 settings will be returned. The maximum value is 1000; values above 1000 + will be coerced to 1000. + :param page_token: str (optional) + A page token, received from a previous `ListAccountUserPreferencesMetadataRequest` call. Provide + this to retrieve the subsequent page. + + When paginating, all other parameters provided to `ListAccountUserPreferencesMetadataRequest` must + match the call that provided the page token. + + :returns: Iterator over :class:`SettingsMetadata` + """ + + query = {} + if page_size is not None: + query["page_size"] = page_size + if page_token is not None: + query["page_token"] = page_token + headers = { + "Accept": "application/json", + } + + while True: + json = self._api.do( + "GET", + f"/api/2.1/accounts/{self._api.account_id}/users/{user_id}/settings-metadata", + query=query, + headers=headers, + ) + if "settings_metadata" in json: + for v in json["settings_metadata"]: + yield SettingsMetadata.from_dict(v) + if "next_page_token" not in json or not json["next_page_token"]: + return + query["page_token"] = json["next_page_token"] + def patch_public_account_setting(self, name: str, setting: Setting) -> Setting: """Patch a setting value at account level. See :method:settingsv2/listaccountsettingsmetadata for list of - setting available via public APIs at account level. + setting available via public APIs at account level. To determine the correct field to include in a + patch request, refer to the type field of the setting returned in the + :method:settingsv2/listaccountsettingsmetadata response. :param name: str :param setting: :class:`Setting` @@ -805,6 +1007,33 @@ def patch_public_account_setting(self, name: str, setting: Setting) -> Setting: ) return Setting.from_dict(res) + def patch_public_account_user_preference(self, user_id: str, name: str, setting: UserPreference) -> UserPreference: + """Patch a setting value for a specific user at account level. See + :method:settingsv2/listaccountuserpreferencesmetadata for list of setting available via public APIs at + account-user level. + + :param user_id: str + User ID of the user whose setting is being updated. + :param name: str + :param setting: :class:`UserPreference` + + :returns: :class:`UserPreference` + """ + + body = setting.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "PATCH", + f"/api/2.1/accounts/{self._api.account_id}/users/{user_id}/settings/{name}", + body=body, + headers=headers, + ) + return UserPreference.from_dict(res) + class WorkspaceSettingsV2API: """APIs to manage workspace level settings""" @@ -817,6 +1046,7 @@ def get_public_workspace_setting(self, name: str) -> Setting: of setting available via public APIs. :param name: str + Name of the setting :returns: :class:`Setting` """ @@ -869,9 +1099,12 @@ def list_workspace_settings_metadata( def patch_public_workspace_setting(self, name: str, setting: Setting) -> Setting: """Patch a setting value at workspace level. See :method:settingsv2/listworkspacesettingsmetadata for - list of setting available via public APIs at workspace level. + list of setting available via public APIs at workspace level. To determine the correct field to + include in a patch request, refer to the type field of the setting returned in the + :method:settingsv2/listworkspacesettingsmetadata response. :param name: str + Name of the setting :param setting: :class:`Setting` :returns: :class:`Setting` diff --git a/databricks/sdk/service/tags.py b/databricks/sdk/service/tags.py index a6711ab41..c406723af 100755 --- a/databricks/sdk/service/tags.py +++ b/databricks/sdk/service/tags.py @@ -367,16 +367,24 @@ def update_tag_assignment( class TagPoliciesAPI: - """The Tag Policy API allows you to manage policies for governed tags in Databricks. Permissions for tag - policies can be managed using the [Account Access Control Proxy API]. + """The Tag Policy API allows you to manage policies for governed tags in Databricks. For Terraform usage, see + the [Tag Policy Terraform documentation]. Permissions for tag policies can be managed using the [Account + Access Control Proxy API]. - [Account Access Control Proxy API]: https://docs.databricks.com/api/workspace/accountaccesscontrolproxy""" + [Account Access Control Proxy API]: https://docs.databricks.com/api/workspace/accountaccesscontrolproxy + [Tag Policy Terraform documentation]: https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/tag_policy + """ def __init__(self, api_client): self._api = api_client def create_tag_policy(self, tag_policy: TagPolicy) -> TagPolicy: - """Creates a new tag policy, making the associated tag key governed. + """Creates a new tag policy, making the associated tag key governed. For Terraform usage, see the [Tag + Policy Terraform documentation]. To manage permissions for tag policies, use the [Account Access + Control Proxy API]. + + [Account Access Control Proxy API]: https://docs.databricks.com/api/workspace/accountaccesscontrolproxy + [Tag Policy Terraform documentation]: https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/tag_policy :param tag_policy: :class:`TagPolicy` @@ -393,7 +401,10 @@ def create_tag_policy(self, tag_policy: TagPolicy) -> TagPolicy: return TagPolicy.from_dict(res) def delete_tag_policy(self, tag_key: str): - """Deletes a tag policy by its associated governed tag's key, leaving that tag key ungoverned. + """Deletes a tag policy by its associated governed tag's key, leaving that tag key ungoverned. For + Terraform usage, see the [Tag Policy Terraform documentation]. + + [Tag Policy Terraform documentation]: https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/tag_policy :param tag_key: str @@ -407,7 +418,12 @@ def delete_tag_policy(self, tag_key: str): self._api.do("DELETE", f"/api/2.1/tag-policies/{tag_key}", headers=headers) def get_tag_policy(self, tag_key: str) -> TagPolicy: - """Gets a single tag policy by its associated governed tag's key. + """Gets a single tag policy by its associated governed tag's key. For Terraform usage, see the [Tag + Policy Terraform documentation]. To list granted permissions for tag policies, use the [Account Access + Control Proxy API]. + + [Account Access Control Proxy API]: https://docs.databricks.com/api/workspace/accountaccesscontrolproxy + [Tag Policy Terraform documentation]: https://registry.terraform.io/providers/databricks/databricks/latest/docs/data-sources/tag_policy :param tag_key: str @@ -424,7 +440,12 @@ def get_tag_policy(self, tag_key: str) -> TagPolicy: def list_tag_policies( self, *, page_size: Optional[int] = None, page_token: Optional[str] = None ) -> Iterator[TagPolicy]: - """Lists the tag policies for all governed tags in the account. + """Lists the tag policies for all governed tags in the account. For Terraform usage, see the [Tag Policy + Terraform documentation]. To list granted permissions for tag policies, use the [Account Access + Control Proxy API]. + + [Account Access Control Proxy API]: https://docs.databricks.com/api/workspace/accountaccesscontrolproxy + [Tag Policy Terraform documentation]: https://registry.terraform.io/providers/databricks/databricks/latest/docs/data-sources/tag_policies :param page_size: int (optional) The maximum number of results to return in this request. Fewer results may be returned than @@ -455,7 +476,12 @@ def list_tag_policies( query["page_token"] = json["next_page_token"] def update_tag_policy(self, tag_key: str, tag_policy: TagPolicy, update_mask: str) -> TagPolicy: - """Updates an existing tag policy for a single governed tag. + """Updates an existing tag policy for a single governed tag. For Terraform usage, see the [Tag Policy + Terraform documentation]. To manage permissions for tag policies, use the [Account Access Control + Proxy API]. + + [Account Access Control Proxy API]: https://docs.databricks.com/api/workspace/accountaccesscontrolproxy + [Tag Policy Terraform documentation]: https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/tag_policy :param tag_key: str :param tag_policy: :class:`TagPolicy` diff --git a/docs/account/billing/usage_dashboards.rst b/docs/account/billing/usage_dashboards.rst index 4eef82411..5c325ba2e 100644 --- a/docs/account/billing/usage_dashboards.rst +++ b/docs/account/billing/usage_dashboards.rst @@ -8,13 +8,15 @@ your usage with pre-built dashboards: visualize breakdowns, analyze tag attributions, and identify cost drivers. - .. py:method:: create( [, dashboard_type: Optional[UsageDashboardType], workspace_id: Optional[int]]) -> CreateBillingUsageDashboardResponse + .. py:method:: create( [, dashboard_type: Optional[UsageDashboardType], major_version: Optional[UsageDashboardMajorVersion], workspace_id: Optional[int]]) -> CreateBillingUsageDashboardResponse Create a usage dashboard specified by workspaceId, accountId, and dashboard type. :param dashboard_type: :class:`UsageDashboardType` (optional) Workspace level usage dashboard shows usage data for the specified workspace ID. Global level usage dashboard shows usage data for all workspaces in the account. + :param major_version: :class:`UsageDashboardMajorVersion` (optional) + The major version of the usage dashboard template to use. Defaults to VERSION_1. :param workspace_id: int (optional) The workspace ID of the workspace in which the usage dashboard is created. diff --git a/docs/account/settingsv2/settings_v2.rst b/docs/account/settingsv2/settings_v2.rst index 03224db02..fe539b50e 100644 --- a/docs/account/settingsv2/settings_v2.rst +++ b/docs/account/settingsv2/settings_v2.rst @@ -16,6 +16,19 @@ :returns: :class:`Setting` + .. py:method:: get_public_account_user_preference(user_id: str, name: str) -> UserPreference + + Get a setting value for a specific user at account level. See + :method:settingsv2/listaccountuserpreferencesmetadata for list of setting available via public APIs. + + :param user_id: str + User ID of the user whose setting is being retrieved. + :param name: str + User Setting name. + + :returns: :class:`UserPreference` + + .. py:method:: list_account_settings_metadata( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[SettingsMetadata] List valid setting keys and metadata. These settings are available to be referenced via GET @@ -35,13 +48,51 @@ :returns: Iterator over :class:`SettingsMetadata` + .. py:method:: list_account_user_preferences_metadata(user_id: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[SettingsMetadata] + + List valid setting keys and metadata for a specific user. These settings are available to be + referenced via GET :method:settingsv2/getpublicaccountuserpreference and PATCH + :method:settingsv2/patchpublicaccountuserpreference APIs + + :param user_id: str + User ID of the user whose settings metadata is being retrieved. + :param page_size: int (optional) + The maximum number of settings to return. The service may return fewer than this value. If + unspecified, at most 200 settings will be returned. The maximum value is 1000; values above 1000 + will be coerced to 1000. + :param page_token: str (optional) + A page token, received from a previous `ListAccountUserPreferencesMetadataRequest` call. Provide + this to retrieve the subsequent page. + + When paginating, all other parameters provided to `ListAccountUserPreferencesMetadataRequest` must + match the call that provided the page token. + + :returns: Iterator over :class:`SettingsMetadata` + + .. py:method:: patch_public_account_setting(name: str, setting: Setting) -> Setting Patch a setting value at account level. See :method:settingsv2/listaccountsettingsmetadata for list of - setting available via public APIs at account level. + setting available via public APIs at account level. To determine the correct field to include in a + patch request, refer to the type field of the setting returned in the + :method:settingsv2/listaccountsettingsmetadata response. :param name: str :param setting: :class:`Setting` :returns: :class:`Setting` + + + .. py:method:: patch_public_account_user_preference(user_id: str, name: str, setting: UserPreference) -> UserPreference + + Patch a setting value for a specific user at account level. See + :method:settingsv2/listaccountuserpreferencesmetadata for list of setting available via public APIs at + account-user level. + + :param user_id: str + User ID of the user whose setting is being updated. + :param name: str + :param setting: :class:`UserPreference` + + :returns: :class:`UserPreference` \ No newline at end of file diff --git a/docs/dbdataclasses/billing.rst b/docs/dbdataclasses/billing.rst index e9a753418..ca3cee231 100644 --- a/docs/dbdataclasses/billing.rst +++ b/docs/dbdataclasses/billing.rst @@ -204,6 +204,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. py:class:: UsageDashboardMajorVersion + + .. py:attribute:: USAGE_DASHBOARD_MAJOR_VERSION_1 + :value: "USAGE_DASHBOARD_MAJOR_VERSION_1" + + .. py:attribute:: USAGE_DASHBOARD_MAJOR_VERSION_2 + :value: "USAGE_DASHBOARD_MAJOR_VERSION_2" + .. py:class:: UsageDashboardType .. py:attribute:: USAGE_DASHBOARD_TYPE_GLOBAL diff --git a/docs/dbdataclasses/catalog.rst b/docs/dbdataclasses/catalog.rst index ff2b4eec4..02f7be550 100644 --- a/docs/dbdataclasses/catalog.rst +++ b/docs/dbdataclasses/catalog.rst @@ -1528,7 +1528,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: SecurableKind - Latest kind: CONNECTION_SLACK_OAUTH_U2M_MAPPING = 272; Next id:273 + Latest kind: SECRET_EXTERNAL_AWS_SECRETS_MANAGER = 273; Next id:274 .. py:attribute:: TABLE_DB_STORAGE :value: "TABLE_DB_STORAGE" diff --git a/docs/dbdataclasses/ml.rst b/docs/dbdataclasses/ml.rst index f7a3373ff..160587741 100644 --- a/docs/dbdataclasses/ml.rst +++ b/docs/dbdataclasses/ml.rst @@ -66,10 +66,18 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: AuthConfig + :members: + :undoc-members: + .. autoclass:: BatchCreateMaterializedFeaturesResponse :members: :undoc-members: +.. autoclass:: ColumnIdentifier + :members: + :undoc-members: + .. py:class:: CommentActivityAction An action that a user (with sufficient permissions) could take on an activity or comment. @@ -429,6 +437,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: KafkaConfig + :members: + :undoc-members: + +.. autoclass:: KafkaSource + :members: + :undoc-members: + .. autoclass:: LineageContext :members: :undoc-members: @@ -453,6 +469,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ListKafkaConfigsResponse + :members: + :undoc-members: + .. autoclass:: ListMaterializedFeaturesResponse :members: :undoc-members: @@ -848,6 +868,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: SchemaConfig + :members: + :undoc-members: + .. autoclass:: SearchExperimentsResponse :members: :undoc-members: @@ -915,6 +939,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: READY :value: "READY" +.. autoclass:: SubscriptionMode + :members: + :undoc-members: + .. autoclass:: TestRegistryWebhookResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/postgres.rst b/docs/dbdataclasses/postgres.rst index 501011b72..9c0ec81b4 100644 --- a/docs/dbdataclasses/postgres.rst +++ b/docs/dbdataclasses/postgres.rst @@ -12,6 +12,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: DatabaseCatalog + :members: + :undoc-members: + .. autoclass:: DatabaseEndpoint :members: :undoc-members: diff --git a/docs/dbdataclasses/settingsv2.rst b/docs/dbdataclasses/settingsv2.rst index d15eb734f..55efcbd8c 100644 --- a/docs/dbdataclasses/settingsv2.rst +++ b/docs/dbdataclasses/settingsv2.rst @@ -101,6 +101,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ListAccountUserPreferencesMetadataResponse + :members: + :undoc-members: + .. autoclass:: ListWorkspaceSettingsMetadataResponse :members: :undoc-members: @@ -142,3 +146,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. autoclass:: StringMessage :members: :undoc-members: + +.. autoclass:: UserPreference + :members: + :undoc-members: diff --git a/docs/workspace/dashboards/genie.rst b/docs/workspace/dashboards/genie.rst index 3c018651c..22ddf10ad 100644 --- a/docs/workspace/dashboards/genie.rst +++ b/docs/workspace/dashboards/genie.rst @@ -36,7 +36,10 @@ :param warehouse_id: str Warehouse to associate with the new space :param serialized_space: str - Serialized export model for the space contents + The contents of the Genie Space in serialized string form. Use the [Get Genie + Space](:method:genie/getspace) API to retrieve an example response, which includes the + `serialized_space` field. This field provides the structure of the JSON string that represents the + space's layout and components. :param description: str (optional) Optional description :param parent_path: str (optional) @@ -213,12 +216,15 @@ :returns: :class:`GenieGetMessageQueryResultResponse` - .. py:method:: get_space(space_id: str) -> GenieSpace + .. py:method:: get_space(space_id: str [, include_serialized_space: Optional[bool]]) -> GenieSpace Get details of a Genie Space. :param space_id: str The ID associated with the Genie space + :param include_serialized_space: bool (optional) + Whether to include the serialized space export in the response. Requires at least CAN EDIT + permission on the space. :returns: :class:`GenieSpace` @@ -322,7 +328,10 @@ :param description: str (optional) Optional description :param serialized_space: str (optional) - Serialized export model for the space contents (full replacement) + The contents of the Genie Space in serialized string form (full replacement). Use the [Get Genie + Space](:method:genie/getspace) API to retrieve an example response, which includes the + `serialized_space` field. This field provides the structure of the JSON string that represents the + space's layout and components. :param title: str (optional) Optional title override :param warehouse_id: str (optional) diff --git a/docs/workspace/ml/feature_engineering.rst b/docs/workspace/ml/feature_engineering.rst index 57c99a11c..b017ec828 100644 --- a/docs/workspace/ml/feature_engineering.rst +++ b/docs/workspace/ml/feature_engineering.rst @@ -26,6 +26,15 @@ :returns: :class:`Feature` + .. py:method:: create_kafka_config(kafka_config: KafkaConfig) -> KafkaConfig + + Create a Kafka config. + + :param kafka_config: :class:`KafkaConfig` + + :returns: :class:`KafkaConfig` + + .. py:method:: create_materialized_feature(materialized_feature: MaterializedFeature) -> MaterializedFeature Create a materialized feature. @@ -46,6 +55,16 @@ + .. py:method:: delete_kafka_config(name: str) + + Delete a Kafka config. + + :param name: str + Name of the Kafka config to delete. + + + + .. py:method:: delete_materialized_feature(materialized_feature_id: str) Delete a materialized feature. @@ -66,6 +85,16 @@ :returns: :class:`Feature` + .. py:method:: get_kafka_config(name: str) -> KafkaConfig + + Get a Kafka config. + + :param name: str + Name of the Kafka config to get. + + :returns: :class:`KafkaConfig` + + .. py:method:: get_materialized_feature(materialized_feature_id: str) -> MaterializedFeature Get a materialized feature. @@ -88,6 +117,18 @@ :returns: Iterator over :class:`Feature` + .. py:method:: list_kafka_configs( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[KafkaConfig] + + List Kafka configs. + + :param page_size: int (optional) + The maximum number of results to return. + :param page_token: str (optional) + Pagination token to go to the next page based on a previous query. + + :returns: Iterator over :class:`KafkaConfig` + + .. py:method:: list_materialized_features( [, feature_name: Optional[str], page_size: Optional[int], page_token: Optional[str]]) -> Iterator[MaterializedFeature] List materialized features. @@ -118,6 +159,22 @@ :returns: :class:`Feature` + .. py:method:: update_kafka_config(name: str, kafka_config: KafkaConfig, update_mask: FieldMask) -> KafkaConfig + + Update a Kafka config. + + :param name: str + Name that uniquely identifies this Kafka config within the metastore. This will be the identifier + used from the Feature object to reference these configs for a feature. Can be distinct from topic + name. + :param kafka_config: :class:`KafkaConfig` + The Kafka config to update. + :param update_mask: FieldMask + The list of fields to update. + + :returns: :class:`KafkaConfig` + + .. py:method:: update_materialized_feature(materialized_feature_id: str, materialized_feature: MaterializedFeature, update_mask: str) -> MaterializedFeature Update a materialized feature (pause/resume). diff --git a/docs/workspace/ml/feature_store.rst b/docs/workspace/ml/feature_store.rst index c85fd5e59..40ba8dc3b 100644 --- a/docs/workspace/ml/feature_store.rst +++ b/docs/workspace/ml/feature_store.rst @@ -31,6 +31,16 @@ + .. py:method:: delete_online_table(online_table_name: str) + + Delete online table. + + :param online_table_name: str + The full three-part (catalog, schema, table) name of the online table. + + + + .. py:method:: get_online_store(name: str) -> OnlineStore Get an Online Feature Store. diff --git a/docs/workspace/postgres/postgres.rst b/docs/workspace/postgres/postgres.rst index 3951227e4..b4a4f3331 100644 --- a/docs/workspace/postgres/postgres.rst +++ b/docs/workspace/postgres/postgres.rst @@ -23,6 +23,15 @@ :returns: :class:`Operation` + .. py:method:: create_database_catalog(catalog: DatabaseCatalog) -> DatabaseCatalog + + Create a Database Catalog. + + :param catalog: :class:`DatabaseCatalog` + + :returns: :class:`DatabaseCatalog` + + .. py:method:: create_database_endpoint(parent: str, database_endpoint: DatabaseEndpoint [, database_endpoint_id: Optional[str]]) -> CreateDatabaseEndpointOperation Create a Database Endpoint. diff --git a/docs/workspace/settingsv2/workspace_settings_v2.rst b/docs/workspace/settingsv2/workspace_settings_v2.rst index 2d6d379df..10534c2fa 100644 --- a/docs/workspace/settingsv2/workspace_settings_v2.rst +++ b/docs/workspace/settingsv2/workspace_settings_v2.rst @@ -12,6 +12,7 @@ of setting available via public APIs. :param name: str + Name of the setting :returns: :class:`Setting` @@ -39,9 +40,12 @@ .. py:method:: patch_public_workspace_setting(name: str, setting: Setting) -> Setting Patch a setting value at workspace level. See :method:settingsv2/listworkspacesettingsmetadata for - list of setting available via public APIs at workspace level. + list of setting available via public APIs at workspace level. To determine the correct field to + include in a patch request, refer to the type field of the setting returned in the + :method:settingsv2/listworkspacesettingsmetadata response. :param name: str + Name of the setting :param setting: :class:`Setting` :returns: :class:`Setting` diff --git a/docs/workspace/tags/tag_policies.rst b/docs/workspace/tags/tag_policies.rst index 35f893b2e..0c335d8ac 100644 --- a/docs/workspace/tags/tag_policies.rst +++ b/docs/workspace/tags/tag_policies.rst @@ -4,14 +4,22 @@ .. py:class:: TagPoliciesAPI - The Tag Policy API allows you to manage policies for governed tags in Databricks. Permissions for tag - policies can be managed using the [Account Access Control Proxy API]. + The Tag Policy API allows you to manage policies for governed tags in Databricks. For Terraform usage, see + the [Tag Policy Terraform documentation]. Permissions for tag policies can be managed using the [Account + Access Control Proxy API]. [Account Access Control Proxy API]: https://docs.databricks.com/api/workspace/accountaccesscontrolproxy + [Tag Policy Terraform documentation]: https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/tag_policy + .. py:method:: create_tag_policy(tag_policy: TagPolicy) -> TagPolicy - Creates a new tag policy, making the associated tag key governed. + Creates a new tag policy, making the associated tag key governed. For Terraform usage, see the [Tag + Policy Terraform documentation]. To manage permissions for tag policies, use the [Account Access + Control Proxy API]. + + [Account Access Control Proxy API]: https://docs.databricks.com/api/workspace/accountaccesscontrolproxy + [Tag Policy Terraform documentation]: https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/tag_policy :param tag_policy: :class:`TagPolicy` @@ -20,7 +28,10 @@ .. py:method:: delete_tag_policy(tag_key: str) - Deletes a tag policy by its associated governed tag's key, leaving that tag key ungoverned. + Deletes a tag policy by its associated governed tag's key, leaving that tag key ungoverned. For + Terraform usage, see the [Tag Policy Terraform documentation]. + + [Tag Policy Terraform documentation]: https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/tag_policy :param tag_key: str @@ -29,7 +40,12 @@ .. py:method:: get_tag_policy(tag_key: str) -> TagPolicy - Gets a single tag policy by its associated governed tag's key. + Gets a single tag policy by its associated governed tag's key. For Terraform usage, see the [Tag + Policy Terraform documentation]. To list granted permissions for tag policies, use the [Account Access + Control Proxy API]. + + [Account Access Control Proxy API]: https://docs.databricks.com/api/workspace/accountaccesscontrolproxy + [Tag Policy Terraform documentation]: https://registry.terraform.io/providers/databricks/databricks/latest/docs/data-sources/tag_policy :param tag_key: str @@ -38,7 +54,12 @@ .. py:method:: list_tag_policies( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[TagPolicy] - Lists the tag policies for all governed tags in the account. + Lists the tag policies for all governed tags in the account. For Terraform usage, see the [Tag Policy + Terraform documentation]. To list granted permissions for tag policies, use the [Account Access + Control Proxy API]. + + [Account Access Control Proxy API]: https://docs.databricks.com/api/workspace/accountaccesscontrolproxy + [Tag Policy Terraform documentation]: https://registry.terraform.io/providers/databricks/databricks/latest/docs/data-sources/tag_policies :param page_size: int (optional) The maximum number of results to return in this request. Fewer results may be returned than @@ -52,7 +73,12 @@ .. py:method:: update_tag_policy(tag_key: str, tag_policy: TagPolicy, update_mask: str) -> TagPolicy - Updates an existing tag policy for a single governed tag. + Updates an existing tag policy for a single governed tag. For Terraform usage, see the [Tag Policy + Terraform documentation]. To manage permissions for tag policies, use the [Account Access Control + Proxy API]. + + [Account Access Control Proxy API]: https://docs.databricks.com/api/workspace/accountaccesscontrolproxy + [Tag Policy Terraform documentation]: https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/tag_policy :param tag_key: str :param tag_policy: :class:`TagPolicy`