diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index b5ddb9b7a..7a9cd634a 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -f21f4933da405cac4bc77c9732044dc45b4f0c5a \ No newline at end of file +9b38571bfe7bf0bc595480f28eb93a8db3116985 \ No newline at end of file diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 02822d4ea..7d0079d78 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -15,4 +15,4 @@ ### API Changes * Add `table_deltasharing_open_dir_based` enum value for `databricks.sdk.service.catalog.SecurableKind`. * Add `creating` and `create_failed` enum values for `databricks.sdk.service.settings.NccPrivateEndpointRulePrivateLinkConnectionState`. -* [Breaking] Remove `access_modes` and `storage_location` fields for `databricks.sdk.service.sharing.Table`. \ No newline at end of file +* [Breaking] Remove `access_modes` and `storage_location` fields for `databricks.sdk.service.sharing.Table`. diff --git a/databricks/sdk/service/catalog.py b/databricks/sdk/service/catalog.py index 15c1d9ba1..cfa11ecd3 100644 --- a/databricks/sdk/service/catalog.py +++ b/databricks/sdk/service/catalog.py @@ -8784,7 +8784,7 @@ def from_dict(cls, d: Dict[str, Any]) -> Securable: class SecurableKind(Enum): - """Latest kind: TABLE_DELTASHARING_OPEN_DIR_BASED = 290; Next id:291""" + """Latest kind: CONNECTION_WORKDAY_HCM_USERNAME_PASSWORD = 292; Next id: 293""" TABLE_DB_STORAGE = "TABLE_DB_STORAGE" TABLE_DELTA = "TABLE_DELTA" diff --git a/databricks/sdk/service/jobs.py b/databricks/sdk/service/jobs.py index b83a73731..46f6768a8 100644 --- a/databricks/sdk/service/jobs.py +++ b/databricks/sdk/service/jobs.py @@ -9444,6 +9444,12 @@ def submit( Runs submitted using this endpoint don’t display in the UI. Use the `jobs/runs/get` API to check the run state after the job is submitted. + **Important:** Jobs submitted using this endpoint are not saved as a job. They do not show up in the + Jobs UI, and do not retry when they fail. Because they are not saved, Databricks cannot auto-optimize + serverless compute in case of failure. If your job fails, you may want to use classic compute to + specify the compute needs for the job. Alternatively, use the `POST /jobs/create` and `POST + /jobs/run-now` endpoints to create and run a saved job. + :param access_control_list: List[:class:`JobAccessControlRequest`] (optional) List of permissions to set on the job. :param budget_policy_id: str (optional) diff --git a/databricks/sdk/service/settings.py b/databricks/sdk/service/settings.py index ebae1d89f..e9263267b 100644 --- a/databricks/sdk/service/settings.py +++ b/databricks/sdk/service/settings.py @@ -4025,7 +4025,10 @@ class NccPrivateEndpointRule: and is ready to use in your serverless compute resources. - REJECTED: Connection was rejected by the private link resource owner. - DISCONNECTED: Connection was removed by the private link resource owner, the private endpoint becomes informative and should be deleted for clean-up. - - EXPIRED: If the endpoint was created but not approved in 14 days, it will be EXPIRED.""" + EXPIRED: If the endpoint was created but not approved in 14 days, it will be EXPIRED. - + CREATING: The endpoint creation is in progress. Once successfully created, the state will + transition to PENDING. - CREATE_FAILED: The endpoint creation failed. You can check the + error_message field for more details.""" creation_time: Optional[int] = None """Time in epoch milliseconds when this object was created.""" diff --git a/databricks/sdk/service/sharing.py b/databricks/sdk/service/sharing.py index 6074f1428..b2e7b48fb 100644 --- a/databricks/sdk/service/sharing.py +++ b/databricks/sdk/service/sharing.py @@ -2648,9 +2648,10 @@ def list( max_results: Optional[int] = None, page_token: Optional[str] = None, ) -> Iterator[ProviderInfo]: - """Gets an array of available authentication providers. The caller must either be a metastore admin or - the owner of the providers. Providers not owned by the caller are not included in the response. There - is no guarantee of a specific ordering of the elements in the array. + """Gets an array of available authentication providers. The caller must either be a metastore admin, have + the **USE_PROVIDER** privilege on the providers, or be the owner of the providers. Providers not owned + by the caller and for which the caller does not have the **USE_PROVIDER** privilege are not included + in the response. There is no guarantee of a specific ordering of the elements in the array. :param data_provider_global_metastore_id: str (optional) If not provided, all providers will be returned. If no providers exist with this ID, no results will diff --git a/docs/account/iam/workspace_assignment.rst b/docs/account/iam/workspace_assignment.rst index 2a8043172..133b16f3d 100644 --- a/docs/account/iam/workspace_assignment.rst +++ b/docs/account/iam/workspace_assignment.rst @@ -43,9 +43,9 @@ a = AccountClient() - workspace_id = os.environ["DUMMY_WORKSPACE_ID"] + workspace_id = os.environ["TEST_WORKSPACE_ID"] - all = a.workspace_assignment.list(workspace_id=workspace_id) + all = a.workspace_assignment.list(list=workspace_id) Get the permission assignments for the specified Databricks account and Databricks workspace. @@ -74,9 +74,9 @@ spn_id = spn.id - workspace_id = os.environ["TEST_WORKSPACE_ID"] + workspace_id = os.environ["DUMMY_WORKSPACE_ID"] - a.workspace_assignment.update( + _ = a.workspace_assignment.update( workspace_id=workspace_id, principal_id=spn_id, permissions=[iam.WorkspacePermission.USER], diff --git a/docs/account/provisioning/credentials.rst b/docs/account/provisioning/credentials.rst index b71c1707e..d63648d58 100644 --- a/docs/account/provisioning/credentials.rst +++ b/docs/account/provisioning/credentials.rst @@ -24,15 +24,15 @@ a = AccountClient() - creds = a.credentials.create( + role = a.credentials.create( credentials_name=f"sdk-{time.time_ns()}", aws_credentials=provisioning.CreateCredentialAwsCredentials( - sts_role=provisioning.CreateCredentialStsRole(role_arn=os.environ["TEST_LOGDELIVERY_ARN"]) + sts_role=provisioning.CreateCredentialStsRole(role_arn=os.environ["TEST_CROSSACCOUNT_ARN"]) ), ) # cleanup - a.credentials.delete(credentials_id=creds.credentials_id) + a.credentials.delete(credentials_id=role.credentials_id) Creates a Databricks credential configuration that represents cloud cross-account credentials for a specified account. Databricks uses this to set up network infrastructure properly to host Databricks diff --git a/docs/account/provisioning/storage.rst b/docs/account/provisioning/storage.rst index 41a04deb3..25ee5abaa 100644 --- a/docs/account/provisioning/storage.rst +++ b/docs/account/provisioning/storage.rst @@ -23,13 +23,10 @@ a = AccountClient() - bucket = a.storage.create( + storage = a.storage.create( storage_configuration_name=f"sdk-{time.time_ns()}", root_bucket_info=provisioning.RootBucketInfo(bucket_name=f"sdk-{time.time_ns()}"), ) - - # cleanup - a.storage.delete(storage_configuration_id=bucket.storage_configuration_id) Creates a Databricks storage configuration for an account. diff --git a/docs/dbdataclasses/catalog.rst b/docs/dbdataclasses/catalog.rst index a0435d055..96f5c9108 100644 --- a/docs/dbdataclasses/catalog.rst +++ b/docs/dbdataclasses/catalog.rst @@ -1497,7 +1497,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: SecurableKind - Latest kind: TABLE_DELTASHARING_OPEN_DIR_BASED = 290; Next id:291 + Latest kind: CONNECTION_WORKDAY_HCM_USERNAME_PASSWORD = 292; Next id: 293 .. py:attribute:: TABLE_DB_STORAGE :value: "TABLE_DB_STORAGE" diff --git a/docs/workspace/catalog/catalogs.rst b/docs/workspace/catalog/catalogs.rst index 17297d8dd..77de87dc4 100644 --- a/docs/workspace/catalog/catalogs.rst +++ b/docs/workspace/catalog/catalogs.rst @@ -24,10 +24,10 @@ w = WorkspaceClient() - created_catalog = w.catalogs.create(name=f"sdk-{time.time_ns()}") + created = w.catalogs.create(name=f"sdk-{time.time_ns()}") # cleanup - w.catalogs.delete(name=created_catalog.name, force=True) + w.catalogs.delete(name=created.name, force=True) Creates a new catalog instance in the parent metastore if the caller is a metastore admin or has the **CREATE_CATALOG** privilege. diff --git a/docs/workspace/catalog/storage_credentials.rst b/docs/workspace/catalog/storage_credentials.rst index 2134ea9c4..ad6e4ebe5 100644 --- a/docs/workspace/catalog/storage_credentials.rst +++ b/docs/workspace/catalog/storage_credentials.rst @@ -30,13 +30,13 @@ w = WorkspaceClient() - credential = w.storage_credentials.create( + created = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) # cleanup - w.storage_credentials.delete(name=credential.name) + w.storage_credentials.delete(delete=created.name) Creates a new storage credential. @@ -172,17 +172,17 @@ created = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) _ = w.storage_credentials.update( name=created.name, comment=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) # cleanup - w.storage_credentials.delete(name=created.name) + w.storage_credentials.delete(delete=created.name) Updates a storage credential on the metastore. diff --git a/docs/workspace/catalog/tables.rst b/docs/workspace/catalog/tables.rst index 009b4bbd2..089b1b7f1 100644 --- a/docs/workspace/catalog/tables.rst +++ b/docs/workspace/catalog/tables.rst @@ -156,7 +156,7 @@ created_schema = w.schemas.create(name=f"sdk-{time.time_ns()}", catalog_name=created_catalog.name) - summaries = w.tables.list_summaries(catalog_name=created_catalog.name, schema_name_pattern=created_schema.name) + all_tables = w.tables.list(catalog_name=created_catalog.name, schema_name=created_schema.name) # cleanup w.schemas.delete(full_name=created_schema.full_name) diff --git a/docs/workspace/compute/clusters.rst b/docs/workspace/compute/clusters.rst index 463e34d0a..8619a5e9a 100644 --- a/docs/workspace/compute/clusters.rst +++ b/docs/workspace/compute/clusters.rst @@ -645,10 +645,11 @@ .. code-block:: from databricks.sdk import WorkspaceClient + from databricks.sdk.service import compute w = WorkspaceClient() - nodes = w.clusters.list_node_types() + all = w.clusters.list(compute.ListClustersRequest()) Return information about all pinned and active clusters, and all clusters terminated within the last 30 days. Clusters terminated prior to this period are not included. diff --git a/docs/workspace/iam/current_user.rst b/docs/workspace/iam/current_user.rst index b2390ce63..2f95213e2 100644 --- a/docs/workspace/iam/current_user.rst +++ b/docs/workspace/iam/current_user.rst @@ -17,7 +17,7 @@ w = WorkspaceClient() - me = w.current_user.me() + me2 = w.current_user.me() Get details about the current method caller's identity. diff --git a/docs/workspace/iam/permissions.rst b/docs/workspace/iam/permissions.rst index 15524c53e..ea24afd1a 100644 --- a/docs/workspace/iam/permissions.rst +++ b/docs/workspace/iam/permissions.rst @@ -44,7 +44,7 @@ obj = w.workspace.get_status(path=notebook_path) - levels = w.permissions.get_permission_levels(request_object_type="notebooks", request_object_id="%d" % (obj.object_id)) + _ = w.permissions.get(request_object_type="notebooks", request_object_id="%d" % (obj.object_id)) Gets the permissions of an object. Objects can inherit permissions from their parent objects or root object. diff --git a/docs/workspace/jobs/jobs.rst b/docs/workspace/jobs/jobs.rst index 6fb32b493..224a89722 100644 --- a/docs/workspace/jobs/jobs.rst +++ b/docs/workspace/jobs/jobs.rst @@ -1075,6 +1075,12 @@ Runs submitted using this endpoint don’t display in the UI. Use the `jobs/runs/get` API to check the run state after the job is submitted. + **Important:** Jobs submitted using this endpoint are not saved as a job. They do not show up in the + Jobs UI, and do not retry when they fail. Because they are not saved, Databricks cannot auto-optimize + serverless compute in case of failure. If your job fails, you may want to use classic compute to + specify the compute needs for the job. Alternatively, use the `POST /jobs/create` and `POST + /jobs/run-now` endpoints to create and run a saved job. + :param access_control_list: List[:class:`JobAccessControlRequest`] (optional) List of permissions to set on the job. :param budget_policy_id: str (optional) diff --git a/docs/workspace/ml/model_registry.rst b/docs/workspace/ml/model_registry.rst index 9a6c8f286..e416ac56b 100644 --- a/docs/workspace/ml/model_registry.rst +++ b/docs/workspace/ml/model_registry.rst @@ -91,6 +91,8 @@ w = WorkspaceClient() model = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") + + mv = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") Creates a new registered model with the name specified in the request body. Throws `RESOURCE_ALREADY_EXISTS` if a registered model with the given name exists. @@ -120,7 +122,7 @@ model = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") - created = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") + mv = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") Creates a model version. diff --git a/docs/workspace/sharing/providers.rst b/docs/workspace/sharing/providers.rst index 1a7c88de9..302039578 100644 --- a/docs/workspace/sharing/providers.rst +++ b/docs/workspace/sharing/providers.rst @@ -101,29 +101,17 @@ .. code-block:: - import time - from databricks.sdk import WorkspaceClient + from databricks.sdk.service import sharing w = WorkspaceClient() - public_share_recipient = """{ - "shareCredentialsVersion":1, - "bearerToken":"dapiabcdefghijklmonpqrstuvwxyz", - "endpoint":"https://sharing.delta.io/delta-sharing/" - } - """ - - created = w.providers.create(name=f"sdk-{time.time_ns()}", recipient_profile_str=public_share_recipient) - - shares = w.providers.list_shares(name=created.name) - - # cleanup - w.providers.delete(name=created.name) + all = w.providers.list(sharing.ListProvidersRequest()) - Gets an array of available authentication providers. The caller must either be a metastore admin or - the owner of the providers. Providers not owned by the caller are not included in the response. There - is no guarantee of a specific ordering of the elements in the array. + Gets an array of available authentication providers. The caller must either be a metastore admin, have + the **USE_PROVIDER** privilege on the providers, or be the owner of the providers. Providers not owned + by the caller and for which the caller does not have the **USE_PROVIDER** privilege are not included + in the response. There is no guarantee of a specific ordering of the elements in the array. :param data_provider_global_metastore_id: str (optional) If not provided, all providers will be returned. If no providers exist with this ID, no results will diff --git a/docs/workspace/workspace/workspace.rst b/docs/workspace/workspace/workspace.rst index 521f3b97f..694026c28 100644 --- a/docs/workspace/workspace/workspace.rst +++ b/docs/workspace/workspace/workspace.rst @@ -79,7 +79,7 @@ notebook = f"/Users/{w.current_user.me().user_name}/sdk-{time.time_ns()}" - export_response = w.workspace.export_(format=workspace.ExportFormat.SOURCE, path=notebook) + export_response = w.workspace.export(format=workspace.ExportFormat.SOURCE, path=notebook) Exports an object or the contents of an entire directory. @@ -180,7 +180,7 @@ w.workspace.import_( path=notebook_path, - overwrite=true_, + overwrite=True, format=workspace.ImportFormat.SOURCE, language=workspace.Language.PYTHON, content=base64.b64encode(