From 0b9a0b4092b3c8b91631017e89df5d0f2d8db630 Mon Sep 17 00:00:00 2001 From: phoenix Date: Tue, 25 Nov 2025 19:01:58 +0000 Subject: [PATCH] Update SDK to 83ba5645aa5cf53f39008962d6ff4e64c07ba02b --- .codegen/_openapi_sha | 2 +- databricks/sdk/service/catalog.py | 2 +- databricks/sdk/service/compute.py | 18 +++----- databricks/sdk/service/sql.py | 7 ++- docs/account/iam/workspace_assignment.rst | 8 ++-- docs/account/provisioning/storage.rst | 6 ++- docs/dbdataclasses/catalog.rst | 2 +- docs/workspace/catalog/catalogs.rst | 7 ++- docs/workspace/catalog/external_locations.rst | 13 +++--- docs/workspace/catalog/schemas.rst | 8 ++-- .../workspace/catalog/storage_credentials.rst | 15 +++---- docs/workspace/compute/clusters.rst | 9 ++-- docs/workspace/iam/current_user.rst | 2 +- docs/workspace/iam/permissions.rst | 2 +- docs/workspace/jobs/jobs.rst | 44 ++++++++++++++----- docs/workspace/sql/queries.rst | 2 +- docs/workspace/workspace/workspace.rst | 16 ++++--- 17 files changed, 93 insertions(+), 70 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 6d4ada17b..e6401d777 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -8f5eedbc991c4f04ce1284406577b0c92d59a224 \ No newline at end of file +83ba5645aa5cf53f39008962d6ff4e64c07ba02b \ No newline at end of file diff --git a/databricks/sdk/service/catalog.py b/databricks/sdk/service/catalog.py index 1e5276d26..2e5922c01 100755 --- a/databricks/sdk/service/catalog.py +++ b/databricks/sdk/service/catalog.py @@ -8744,7 +8744,7 @@ def from_dict(cls, d: Dict[str, Any]) -> Securable: class SecurableKind(Enum): - """Latest kind: SECRET_EXTERNAL_AWS_SECRETS_MANAGER = 273; Next id:274""" + """Latest kind: CREDENTIAL_STORAGE_UC_CONNECTION = 275; Next id:276""" TABLE_DB_STORAGE = "TABLE_DB_STORAGE" TABLE_DELTA = "TABLE_DELTA" diff --git a/databricks/sdk/service/compute.py b/databricks/sdk/service/compute.py index 473c7cca1..a6e062229 100755 --- a/databricks/sdk/service/compute.py +++ b/databricks/sdk/service/compute.py @@ -623,8 +623,7 @@ class ClusterAttributes: enable_elastic_disk: Optional[bool] = None """Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk - space when its Spark workers are running low on disk space. This feature requires specific AWS - permissions to function correctly - refer to the User Guide for more details.""" + space when its Spark workers are running low on disk space.""" enable_local_disk_encryption: Optional[bool] = None """Whether to enable LUKS on cluster VMs' local disks""" @@ -1018,8 +1017,7 @@ class ClusterDetails: enable_elastic_disk: Optional[bool] = None """Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk - space when its Spark workers are running low on disk space. This feature requires specific AWS - permissions to function correctly - refer to the User Guide for more details.""" + space when its Spark workers are running low on disk space.""" enable_local_disk_encryption: Optional[bool] = None """Whether to enable LUKS on cluster VMs' local disks""" @@ -2083,8 +2081,7 @@ class ClusterSpec: enable_elastic_disk: Optional[bool] = None """Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk - space when its Spark workers are running low on disk space. This feature requires specific AWS - permissions to function correctly - refer to the User Guide for more details.""" + space when its Spark workers are running low on disk space.""" enable_local_disk_encryption: Optional[bool] = None """Whether to enable LUKS on cluster VMs' local disks""" @@ -7341,8 +7338,7 @@ class UpdateClusterResource: enable_elastic_disk: Optional[bool] = None """Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk - space when its Spark workers are running low on disk space. This feature requires specific AWS - permissions to function correctly - refer to the User Guide for more details.""" + space when its Spark workers are running low on disk space.""" enable_local_disk_encryption: Optional[bool] = None """Whether to enable LUKS on cluster VMs' local disks""" @@ -8271,8 +8267,7 @@ def create( node_type_id take precedence. :param enable_elastic_disk: bool (optional) Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space - when its Spark workers are running low on disk space. This feature requires specific AWS permissions - to function correctly - refer to the User Guide for more details. + when its Spark workers are running low on disk space. :param enable_local_disk_encryption: bool (optional) Whether to enable LUKS on cluster VMs' local disks :param gcp_attributes: :class:`GcpAttributes` (optional) @@ -8636,8 +8631,7 @@ def edit( node_type_id take precedence. :param enable_elastic_disk: bool (optional) Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space - when its Spark workers are running low on disk space. This feature requires specific AWS permissions - to function correctly - refer to the User Guide for more details. + when its Spark workers are running low on disk space. :param enable_local_disk_encryption: bool (optional) Whether to enable LUKS on cluster VMs' local disks :param gcp_attributes: :class:`GcpAttributes` (optional) diff --git a/databricks/sdk/service/sql.py b/databricks/sdk/service/sql.py index 403f401d3..7f7c69e0b 100755 --- a/databricks/sdk/service/sql.py +++ b/databricks/sdk/service/sql.py @@ -876,8 +876,11 @@ class AlertV2Notification: """Whether to notify alert subscribers when alert returns back to normal.""" retrigger_seconds: Optional[int] = None - """Number of seconds an alert must wait after being triggered to rearm itself. After rearming, it - can be triggered again. If 0 or not specified, the alert will not be triggered again.""" + """Number of seconds an alert waits after being triggered before it is allowed to send another + notification. If set to 0 or omitted, the alert will not send any further notifications after + the first trigger Setting this value to 1 allows the alert to send a notification on every + evaluation where the condition is met, effectively making it always retrigger for notification + purposes.""" subscriptions: Optional[List[AlertV2Subscription]] = None diff --git a/docs/account/iam/workspace_assignment.rst b/docs/account/iam/workspace_assignment.rst index ca78b86df..fa9c2ee3e 100644 --- a/docs/account/iam/workspace_assignment.rst +++ b/docs/account/iam/workspace_assignment.rst @@ -43,9 +43,9 @@ a = AccountClient() - workspace_id = os.environ["TEST_WORKSPACE_ID"] + workspace_id = os.environ["DUMMY_WORKSPACE_ID"] - all = a.workspace_assignment.list(list=workspace_id) + all = a.workspace_assignment.list(workspace_id=workspace_id) Get the permission assignments for the specified Databricks account and Databricks workspace. @@ -74,9 +74,9 @@ spn_id = spn.id - workspace_id = os.environ["TEST_WORKSPACE_ID"] + workspace_id = os.environ["DUMMY_WORKSPACE_ID"] - a.workspace_assignment.update( + _ = a.workspace_assignment.update( workspace_id=workspace_id, principal_id=spn_id, permissions=[iam.WorkspacePermission.USER], diff --git a/docs/account/provisioning/storage.rst b/docs/account/provisioning/storage.rst index 25ee5abaa..b9f080e36 100644 --- a/docs/account/provisioning/storage.rst +++ b/docs/account/provisioning/storage.rst @@ -16,6 +16,7 @@ .. code-block:: + import os import time from databricks.sdk import AccountClient @@ -25,8 +26,11 @@ storage = a.storage.create( storage_configuration_name=f"sdk-{time.time_ns()}", - root_bucket_info=provisioning.RootBucketInfo(bucket_name=f"sdk-{time.time_ns()}"), + root_bucket_info=provisioning.RootBucketInfo(bucket_name=os.environ["TEST_ROOT_BUCKET"]), ) + + # cleanup + a.storage.delete(storage_configuration_id=storage.storage_configuration_id) Creates a Databricks storage configuration for an account. diff --git a/docs/dbdataclasses/catalog.rst b/docs/dbdataclasses/catalog.rst index ca2bb65bb..7af7333c8 100644 --- a/docs/dbdataclasses/catalog.rst +++ b/docs/dbdataclasses/catalog.rst @@ -1497,7 +1497,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: SecurableKind - Latest kind: SECRET_EXTERNAL_AWS_SECRETS_MANAGER = 273; Next id:274 + Latest kind: CREDENTIAL_STORAGE_UC_CONNECTION = 275; Next id:276 .. py:attribute:: TABLE_DB_STORAGE :value: "TABLE_DB_STORAGE" diff --git a/docs/workspace/catalog/catalogs.rst b/docs/workspace/catalog/catalogs.rst index 258f994d3..0b4d11aaa 100644 --- a/docs/workspace/catalog/catalogs.rst +++ b/docs/workspace/catalog/catalogs.rst @@ -24,10 +24,10 @@ w = WorkspaceClient() - created_catalog = w.catalogs.create(name=f"sdk-{time.time_ns()}") + new_catalog = w.catalogs.create(name=f"sdk-{time.time_ns()}") # cleanup - w.catalogs.delete(name=created_catalog.name, force=True) + w.catalogs.delete(name=new_catalog.name, force=True) Creates a new catalog instance in the parent metastore if the caller is a metastore admin or has the **CREATE_CATALOG** privilege. @@ -155,13 +155,12 @@ import time from databricks.sdk import WorkspaceClient - from databricks.sdk.service import catalog w = WorkspaceClient() created = w.catalogs.create(name=f"sdk-{time.time_ns()}") - _ = w.catalogs.update(name=created.name, isolation_mode=catalog.CatalogIsolationMode.ISOLATED) + _ = w.catalogs.update(name=created.name, comment="updated") # cleanup w.catalogs.delete(name=created.name, force=True) diff --git a/docs/workspace/catalog/external_locations.rst b/docs/workspace/catalog/external_locations.rst index fdf69e38a..921069e27 100644 --- a/docs/workspace/catalog/external_locations.rst +++ b/docs/workspace/catalog/external_locations.rst @@ -105,20 +105,20 @@ credential = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) created = w.external_locations.create( name=f"sdk-{time.time_ns()}", credential_name=credential.name, - url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), + url=f's3://{os.environ["TEST_BUCKET"]}/sdk-{time.time_ns()}', ) - _ = w.external_locations.get(name=created.name) + _ = w.external_locations.get(get=created.name) # cleanup - w.storage_credentials.delete(name=credential.name) - w.external_locations.delete(name=created.name) + w.storage_credentials.delete(delete=credential.name) + w.external_locations.delete(delete=created.name) Gets an external location from the metastore. The caller must be either a metastore admin, the owner of the external location, or a user that has some privilege on the external location. @@ -140,11 +140,10 @@ .. code-block:: from databricks.sdk import WorkspaceClient - from databricks.sdk.service import catalog w = WorkspaceClient() - all = w.external_locations.list(catalog.ListExternalLocationsRequest()) + all = w.external_locations.list() Gets an array of external locations (__ExternalLocationInfo__ objects) from the metastore. The caller must be a metastore admin, the owner of the external location, or a user that has some privilege on diff --git a/docs/workspace/catalog/schemas.rst b/docs/workspace/catalog/schemas.rst index 719d5a156..fd1479c78 100644 --- a/docs/workspace/catalog/schemas.rst +++ b/docs/workspace/catalog/schemas.rst @@ -22,13 +22,13 @@ w = WorkspaceClient() - created_catalog = w.catalogs.create(name=f"sdk-{time.time_ns()}") + new_catalog = w.catalogs.create(name=f"sdk-{time.time_ns()}") - created_schema = w.schemas.create(name=f"sdk-{time.time_ns()}", catalog_name=created_catalog.name) + created = w.schemas.create(name=f"sdk-{time.time_ns()}", catalog_name=new_catalog.name) # cleanup - w.catalogs.delete(name=created_catalog.name, force=True) - w.schemas.delete(full_name=created_schema.full_name) + w.catalogs.delete(name=new_catalog.name, force=True) + w.schemas.delete(full_name=created.full_name) Creates a new schema for catalog in the Metastore. The caller must be a metastore admin, or have the **CREATE_SCHEMA** privilege in the parent catalog. diff --git a/docs/workspace/catalog/storage_credentials.rst b/docs/workspace/catalog/storage_credentials.rst index 2eacfda5e..e52df07b3 100644 --- a/docs/workspace/catalog/storage_credentials.rst +++ b/docs/workspace/catalog/storage_credentials.rst @@ -30,13 +30,13 @@ w = WorkspaceClient() - created = w.storage_credentials.create( + credential = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) # cleanup - w.storage_credentials.delete(delete=created.name) + w.storage_credentials.delete(name=credential.name) Creates a new storage credential. @@ -123,11 +123,10 @@ .. code-block:: from databricks.sdk import WorkspaceClient - from databricks.sdk.service import catalog w = WorkspaceClient() - all = w.storage_credentials.list(catalog.ListStorageCredentialsRequest()) + all = w.storage_credentials.list() Gets an array of storage credentials (as __StorageCredentialInfo__ objects). The array is limited to only those storage credentials the caller has permission to access. If the caller is a metastore @@ -173,17 +172,17 @@ created = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) _ = w.storage_credentials.update( name=created.name, comment=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) # cleanup - w.storage_credentials.delete(delete=created.name) + w.storage_credentials.delete(name=created.name) Updates a storage credential on the metastore. diff --git a/docs/workspace/compute/clusters.rst b/docs/workspace/compute/clusters.rst index db78626ff..8619a5e9a 100644 --- a/docs/workspace/compute/clusters.rst +++ b/docs/workspace/compute/clusters.rst @@ -167,8 +167,7 @@ node_type_id take precedence. :param enable_elastic_disk: bool (optional) Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space - when its Spark workers are running low on disk space. This feature requires specific AWS permissions - to function correctly - refer to the User Guide for more details. + when its Spark workers are running low on disk space. :param enable_local_disk_encryption: bool (optional) Whether to enable LUKS on cluster VMs' local disks :param gcp_attributes: :class:`GcpAttributes` (optional) @@ -402,8 +401,7 @@ node_type_id take precedence. :param enable_elastic_disk: bool (optional) Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space - when its Spark workers are running low on disk space. This feature requires specific AWS permissions - to function correctly - refer to the User Guide for more details. + when its Spark workers are running low on disk space. :param enable_local_disk_encryption: bool (optional) Whether to enable LUKS on cluster VMs' local disks :param gcp_attributes: :class:`GcpAttributes` (optional) @@ -647,10 +645,11 @@ .. code-block:: from databricks.sdk import WorkspaceClient + from databricks.sdk.service import compute w = WorkspaceClient() - nodes = w.clusters.list_node_types() + all = w.clusters.list(compute.ListClustersRequest()) Return information about all pinned and active clusters, and all clusters terminated within the last 30 days. Clusters terminated prior to this period are not included. diff --git a/docs/workspace/iam/current_user.rst b/docs/workspace/iam/current_user.rst index 2f95213e2..b2390ce63 100644 --- a/docs/workspace/iam/current_user.rst +++ b/docs/workspace/iam/current_user.rst @@ -17,7 +17,7 @@ w = WorkspaceClient() - me2 = w.current_user.me() + me = w.current_user.me() Get details about the current method caller's identity. diff --git a/docs/workspace/iam/permissions.rst b/docs/workspace/iam/permissions.rst index ea24afd1a..15524c53e 100644 --- a/docs/workspace/iam/permissions.rst +++ b/docs/workspace/iam/permissions.rst @@ -44,7 +44,7 @@ obj = w.workspace.get_status(path=notebook_path) - _ = w.permissions.get(request_object_type="notebooks", request_object_id="%d" % (obj.object_id)) + levels = w.permissions.get_permission_levels(request_object_type="notebooks", request_object_id="%d" % (obj.object_id)) Gets the permissions of an object. Objects can inherit permissions from their parent objects or root object. diff --git a/docs/workspace/jobs/jobs.rst b/docs/workspace/jobs/jobs.rst index a28fe7aee..c3ff96a4e 100644 --- a/docs/workspace/jobs/jobs.rst +++ b/docs/workspace/jobs/jobs.rst @@ -358,23 +358,21 @@ w.clusters.ensure_cluster_is_running(os.environ["DATABRICKS_CLUSTER_ID"]) and os.environ["DATABRICKS_CLUSTER_ID"] ) - created_job = w.jobs.create( - name=f"sdk-{time.time_ns()}", + run = w.jobs.submit( + run_name=f"sdk-{time.time_ns()}", tasks=[ - jobs.Task( - description="test", + jobs.SubmitTask( existing_cluster_id=cluster_id, notebook_task=jobs.NotebookTask(notebook_path=notebook_path), - task_key="test", - timeout_seconds=0, + task_key=f"sdk-{time.time_ns()}", ) ], - ) + ).result() - by_id = w.jobs.get(job_id=created_job.job_id) + output = w.jobs.get_run_output(run_id=run.tasks[0].run_id) # cleanup - w.jobs.delete(job_id=created_job.job_id) + w.jobs.delete_run(run_id=run.run_id) Get a single job. @@ -523,11 +521,37 @@ .. code-block:: + import os + import time + from databricks.sdk import WorkspaceClient + from databricks.sdk.service import jobs w = WorkspaceClient() - job_list = w.jobs.list(expand_tasks=False) + notebook_path = f"/Users/{w.current_user.me().user_name}/sdk-{time.time_ns()}" + + cluster_id = ( + w.clusters.ensure_cluster_is_running(os.environ["DATABRICKS_CLUSTER_ID"]) and os.environ["DATABRICKS_CLUSTER_ID"] + ) + + created_job = w.jobs.create( + name=f"sdk-{time.time_ns()}", + tasks=[ + jobs.Task( + description="test", + existing_cluster_id=cluster_id, + notebook_task=jobs.NotebookTask(notebook_path=notebook_path), + task_key="test", + timeout_seconds=0, + ) + ], + ) + + run_list = w.jobs.list_runs(job_id=created_job.job_id) + + # cleanup + w.jobs.delete(job_id=created_job.job_id) List jobs. diff --git a/docs/workspace/sql/queries.rst b/docs/workspace/sql/queries.rst index f0081b3f2..0dfb63fbf 100644 --- a/docs/workspace/sql/queries.rst +++ b/docs/workspace/sql/queries.rst @@ -29,7 +29,7 @@ display_name=f"sdk-{time.time_ns()}", warehouse_id=srcs[0].warehouse_id, description="test query from Go SDK", - query_text="SHOW TABLES", + query_text="SELECT 1", ) ) diff --git a/docs/workspace/workspace/workspace.rst b/docs/workspace/workspace/workspace.rst index aaccfb1c4..8942147de 100644 --- a/docs/workspace/workspace/workspace.rst +++ b/docs/workspace/workspace/workspace.rst @@ -148,9 +148,9 @@ w = WorkspaceClient() - notebook_path = f"/Users/{w.current_user.me().user_name}/sdk-{time.time_ns()}" + notebook = f"/Users/{w.current_user.me().user_name}/sdk-{time.time_ns()}" - obj = w.workspace.get_status(path=notebook_path) + get_status_response = w.workspace.get_status(path=notebook) Gets the status of an object or a directory. If `path` does not exist, this call returns an error `RESOURCE_DOES_NOT_EXIST`. @@ -182,7 +182,7 @@ content=base64.b64encode(("CREATE LIVE TABLE dlt_sample AS SELECT 1").encode()).decode(), format=workspace.ImportFormat.SOURCE, language=workspace.Language.SQL, - overwrite=true_, + overwrite=True, path=notebook_path, ) @@ -227,14 +227,16 @@ .. code-block:: + import os + import time + from databricks.sdk import WorkspaceClient w = WorkspaceClient() - names = [] - for i in w.workspace.list(f"/Users/{w.current_user.me().user_name}", recursive=True): - names.append(i.path) - assert len(names) > 0 + notebook = f"/Users/{w.current_user.me().user_name}/sdk-{time.time_ns()}" + + objects = w.workspace.list(path=os.path.dirname(notebook)) List workspace objects