Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .codegen/_openapi_sha
Original file line number Diff line number Diff line change
@@ -1 +1 @@
8f5eedbc991c4f04ce1284406577b0c92d59a224
83ba5645aa5cf53f39008962d6ff4e64c07ba02b
2 changes: 1 addition & 1 deletion databricks/sdk/service/catalog.py

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

18 changes: 6 additions & 12 deletions databricks/sdk/service/compute.py

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 5 additions & 2 deletions databricks/sdk/service/sql.py

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 4 additions & 4 deletions docs/account/iam/workspace_assignment.rst
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,9 @@

a = AccountClient()

workspace_id = os.environ["TEST_WORKSPACE_ID"]
workspace_id = os.environ["DUMMY_WORKSPACE_ID"]

all = a.workspace_assignment.list(list=workspace_id)
all = a.workspace_assignment.list(workspace_id=workspace_id)

Get the permission assignments for the specified Databricks account and Databricks workspace.

Expand Down Expand Up @@ -74,9 +74,9 @@

spn_id = spn.id

workspace_id = os.environ["TEST_WORKSPACE_ID"]
workspace_id = os.environ["DUMMY_WORKSPACE_ID"]

a.workspace_assignment.update(
_ = a.workspace_assignment.update(
workspace_id=workspace_id,
principal_id=spn_id,
permissions=[iam.WorkspacePermission.USER],
Expand Down
6 changes: 5 additions & 1 deletion docs/account/provisioning/storage.rst
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

.. code-block::

import os
import time

from databricks.sdk import AccountClient
Expand All @@ -25,8 +26,11 @@

storage = a.storage.create(
storage_configuration_name=f"sdk-{time.time_ns()}",
root_bucket_info=provisioning.RootBucketInfo(bucket_name=f"sdk-{time.time_ns()}"),
root_bucket_info=provisioning.RootBucketInfo(bucket_name=os.environ["TEST_ROOT_BUCKET"]),
)

# cleanup
a.storage.delete(storage_configuration_id=storage.storage_configuration_id)

Creates a Databricks storage configuration for an account.

Expand Down
2 changes: 1 addition & 1 deletion docs/dbdataclasses/catalog.rst
Original file line number Diff line number Diff line change
Expand Up @@ -1497,7 +1497,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo

.. py:class:: SecurableKind
Latest kind: SECRET_EXTERNAL_AWS_SECRETS_MANAGER = 273; Next id:274
Latest kind: CREDENTIAL_STORAGE_UC_CONNECTION = 275; Next id:276

.. py:attribute:: TABLE_DB_STORAGE
:value: "TABLE_DB_STORAGE"
Expand Down
7 changes: 3 additions & 4 deletions docs/workspace/catalog/catalogs.rst
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,10 @@

w = WorkspaceClient()

created_catalog = w.catalogs.create(name=f"sdk-{time.time_ns()}")
new_catalog = w.catalogs.create(name=f"sdk-{time.time_ns()}")

# cleanup
w.catalogs.delete(name=created_catalog.name, force=True)
w.catalogs.delete(name=new_catalog.name, force=True)

Creates a new catalog instance in the parent metastore if the caller is a metastore admin or has the
**CREATE_CATALOG** privilege.
Expand Down Expand Up @@ -155,13 +155,12 @@
import time

from databricks.sdk import WorkspaceClient
from databricks.sdk.service import catalog

w = WorkspaceClient()

created = w.catalogs.create(name=f"sdk-{time.time_ns()}")

_ = w.catalogs.update(name=created.name, isolation_mode=catalog.CatalogIsolationMode.ISOLATED)
_ = w.catalogs.update(name=created.name, comment="updated")

# cleanup
w.catalogs.delete(name=created.name, force=True)
Expand Down
13 changes: 6 additions & 7 deletions docs/workspace/catalog/external_locations.rst
Original file line number Diff line number Diff line change
Expand Up @@ -105,20 +105,20 @@

credential = w.storage_credentials.create(
name=f"sdk-{time.time_ns()}",
aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]),
aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]),
)

created = w.external_locations.create(
name=f"sdk-{time.time_ns()}",
credential_name=credential.name,
url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"),
url=f's3://{os.environ["TEST_BUCKET"]}/sdk-{time.time_ns()}',
)

_ = w.external_locations.get(name=created.name)
_ = w.external_locations.get(get=created.name)

# cleanup
w.storage_credentials.delete(name=credential.name)
w.external_locations.delete(name=created.name)
w.storage_credentials.delete(delete=credential.name)
w.external_locations.delete(delete=created.name)

Gets an external location from the metastore. The caller must be either a metastore admin, the owner
of the external location, or a user that has some privilege on the external location.
Expand All @@ -140,11 +140,10 @@
.. code-block::

from databricks.sdk import WorkspaceClient
from databricks.sdk.service import catalog

w = WorkspaceClient()

all = w.external_locations.list(catalog.ListExternalLocationsRequest())
all = w.external_locations.list()

Gets an array of external locations (__ExternalLocationInfo__ objects) from the metastore. The caller
must be a metastore admin, the owner of the external location, or a user that has some privilege on
Expand Down
8 changes: 4 additions & 4 deletions docs/workspace/catalog/schemas.rst
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,13 @@

w = WorkspaceClient()

created_catalog = w.catalogs.create(name=f"sdk-{time.time_ns()}")
new_catalog = w.catalogs.create(name=f"sdk-{time.time_ns()}")

created_schema = w.schemas.create(name=f"sdk-{time.time_ns()}", catalog_name=created_catalog.name)
created = w.schemas.create(name=f"sdk-{time.time_ns()}", catalog_name=new_catalog.name)

# cleanup
w.catalogs.delete(name=created_catalog.name, force=True)
w.schemas.delete(full_name=created_schema.full_name)
w.catalogs.delete(name=new_catalog.name, force=True)
w.schemas.delete(full_name=created.full_name)

Creates a new schema for catalog in the Metastore. The caller must be a metastore admin, or have the
**CREATE_SCHEMA** privilege in the parent catalog.
Expand Down
15 changes: 7 additions & 8 deletions docs/workspace/catalog/storage_credentials.rst
Original file line number Diff line number Diff line change
Expand Up @@ -30,13 +30,13 @@

w = WorkspaceClient()

created = w.storage_credentials.create(
credential = w.storage_credentials.create(
name=f"sdk-{time.time_ns()}",
aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]),
aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]),
)

# cleanup
w.storage_credentials.delete(delete=created.name)
w.storage_credentials.delete(name=credential.name)

Creates a new storage credential.

Expand Down Expand Up @@ -123,11 +123,10 @@
.. code-block::

from databricks.sdk import WorkspaceClient
from databricks.sdk.service import catalog

w = WorkspaceClient()

all = w.storage_credentials.list(catalog.ListStorageCredentialsRequest())
all = w.storage_credentials.list()

Gets an array of storage credentials (as __StorageCredentialInfo__ objects). The array is limited to
only those storage credentials the caller has permission to access. If the caller is a metastore
Expand Down Expand Up @@ -173,17 +172,17 @@

created = w.storage_credentials.create(
name=f"sdk-{time.time_ns()}",
aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]),
aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]),
)

_ = w.storage_credentials.update(
name=created.name,
comment=f"sdk-{time.time_ns()}",
aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]),
aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]),
)

# cleanup
w.storage_credentials.delete(delete=created.name)
w.storage_credentials.delete(name=created.name)

Updates a storage credential on the metastore.

Expand Down
9 changes: 4 additions & 5 deletions docs/workspace/compute/clusters.rst
Original file line number Diff line number Diff line change
Expand Up @@ -167,8 +167,7 @@
node_type_id take precedence.
:param enable_elastic_disk: bool (optional)
Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space
when its Spark workers are running low on disk space. This feature requires specific AWS permissions
to function correctly - refer to the User Guide for more details.
when its Spark workers are running low on disk space.
:param enable_local_disk_encryption: bool (optional)
Whether to enable LUKS on cluster VMs' local disks
:param gcp_attributes: :class:`GcpAttributes` (optional)
Expand Down Expand Up @@ -402,8 +401,7 @@
node_type_id take precedence.
:param enable_elastic_disk: bool (optional)
Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space
when its Spark workers are running low on disk space. This feature requires specific AWS permissions
to function correctly - refer to the User Guide for more details.
when its Spark workers are running low on disk space.
:param enable_local_disk_encryption: bool (optional)
Whether to enable LUKS on cluster VMs' local disks
:param gcp_attributes: :class:`GcpAttributes` (optional)
Expand Down Expand Up @@ -647,10 +645,11 @@
.. code-block::

from databricks.sdk import WorkspaceClient
from databricks.sdk.service import compute

w = WorkspaceClient()

nodes = w.clusters.list_node_types()
all = w.clusters.list(compute.ListClustersRequest())

Return information about all pinned and active clusters, and all clusters terminated within the last
30 days. Clusters terminated prior to this period are not included.
Expand Down
2 changes: 1 addition & 1 deletion docs/workspace/iam/current_user.rst
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

w = WorkspaceClient()

me2 = w.current_user.me()
me = w.current_user.me()

Get details about the current method caller's identity.

Expand Down
2 changes: 1 addition & 1 deletion docs/workspace/iam/permissions.rst
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@

obj = w.workspace.get_status(path=notebook_path)

_ = w.permissions.get(request_object_type="notebooks", request_object_id="%d" % (obj.object_id))
levels = w.permissions.get_permission_levels(request_object_type="notebooks", request_object_id="%d" % (obj.object_id))

Gets the permissions of an object. Objects can inherit permissions from their parent objects or root
object.
Expand Down
44 changes: 34 additions & 10 deletions docs/workspace/jobs/jobs.rst
Original file line number Diff line number Diff line change
Expand Up @@ -358,23 +358,21 @@
w.clusters.ensure_cluster_is_running(os.environ["DATABRICKS_CLUSTER_ID"]) and os.environ["DATABRICKS_CLUSTER_ID"]
)

created_job = w.jobs.create(
name=f"sdk-{time.time_ns()}",
run = w.jobs.submit(
run_name=f"sdk-{time.time_ns()}",
tasks=[
jobs.Task(
description="test",
jobs.SubmitTask(
existing_cluster_id=cluster_id,
notebook_task=jobs.NotebookTask(notebook_path=notebook_path),
task_key="test",
timeout_seconds=0,
task_key=f"sdk-{time.time_ns()}",
)
],
)
).result()

by_id = w.jobs.get(job_id=created_job.job_id)
output = w.jobs.get_run_output(run_id=run.tasks[0].run_id)

# cleanup
w.jobs.delete(job_id=created_job.job_id)
w.jobs.delete_run(run_id=run.run_id)

Get a single job.

Expand Down Expand Up @@ -523,11 +521,37 @@

.. code-block::

import os
import time

from databricks.sdk import WorkspaceClient
from databricks.sdk.service import jobs

w = WorkspaceClient()

job_list = w.jobs.list(expand_tasks=False)
notebook_path = f"/Users/{w.current_user.me().user_name}/sdk-{time.time_ns()}"

cluster_id = (
w.clusters.ensure_cluster_is_running(os.environ["DATABRICKS_CLUSTER_ID"]) and os.environ["DATABRICKS_CLUSTER_ID"]
)

created_job = w.jobs.create(
name=f"sdk-{time.time_ns()}",
tasks=[
jobs.Task(
description="test",
existing_cluster_id=cluster_id,
notebook_task=jobs.NotebookTask(notebook_path=notebook_path),
task_key="test",
timeout_seconds=0,
)
],
)

run_list = w.jobs.list_runs(job_id=created_job.job_id)

# cleanup
w.jobs.delete(job_id=created_job.job_id)

List jobs.

Expand Down
Loading
Loading