diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 62eb1dbba..9a25b980c 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -69902d1abe35bd9e78e0231927bf14d11b383a16 \ No newline at end of file +129063c55cb0cf4bda0d561f0bdb7e77d00b9df6 \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 3bfcfa2e1..67bb57ca4 100755 --- a/.gitattributes +++ b/.gitattributes @@ -1,7 +1,7 @@ databricks/sdk/__init__.py linguist-generated=true databricks/sdk/errors/overrides.py linguist-generated=true databricks/sdk/errors/platform.py linguist-generated=true -databricks/sdk/service/aibuilder.py linguist-generated=true +databricks/sdk/service/agentbricks.py linguist-generated=true databricks/sdk/service/apps.py linguist-generated=true databricks/sdk/service/billing.py linguist-generated=true databricks/sdk/service/catalog.py linguist-generated=true diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index e01c87b28..27b159f6f 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -18,3 +18,20 @@ - Refactor unit tests for `FilesExt` to improve its readability. ### API Changes +* Added `databricks.sdk.service.agentbricks` package. +* Added `provisioning_phase` field for `databricks.sdk.service.database.SyncedTablePipelineProgress`. +* Added `redshift` and `sqldw` enum values for `databricks.sdk.service.pipelines.IngestionSourceType`. +* Added `germany_c5` enum value for `databricks.sdk.service.settings.ComplianceStandard`. +* Changed `asset_type` and `name` fields for `databricks.sdk.service.cleanrooms.CleanRoomAsset` to be required. +* [Breaking] Changed `asset_type` and `name` fields for `databricks.sdk.service.cleanrooms.CleanRoomAsset` to be required. +* [Breaking] Changed `local_name` field for `databricks.sdk.service.cleanrooms.CleanRoomAssetForeignTableLocalDetails` to be required. +* Changed `local_name` field for `databricks.sdk.service.cleanrooms.CleanRoomAssetForeignTableLocalDetails` to be required. +* Changed `notebook_content` field for `databricks.sdk.service.cleanrooms.CleanRoomAssetNotebook` to be required. +* [Breaking] Changed `notebook_content` field for `databricks.sdk.service.cleanrooms.CleanRoomAssetNotebook` to be required. +* Changed `local_name` field for `databricks.sdk.service.cleanrooms.CleanRoomAssetTableLocalDetails` to be required. +* [Breaking] Changed `local_name` field for `databricks.sdk.service.cleanrooms.CleanRoomAssetTableLocalDetails` to be required. +* [Breaking] Changed `local_name` field for `databricks.sdk.service.cleanrooms.CleanRoomAssetViewLocalDetails` to be required. +* Changed `local_name` field for `databricks.sdk.service.cleanrooms.CleanRoomAssetViewLocalDetails` to be required. +* [Breaking] Changed `local_name` field for `databricks.sdk.service.cleanrooms.CleanRoomAssetVolumeLocalDetails` to be required. +* Changed `local_name` field for `databricks.sdk.service.cleanrooms.CleanRoomAssetVolumeLocalDetails` to be required. +* [Breaking] Removed `databricks.sdk.service.aibuilder` package. diff --git a/databricks/sdk/__init__.py b/databricks/sdk/__init__.py index deb1f7785..43f696256 100755 --- a/databricks/sdk/__init__.py +++ b/databricks/sdk/__init__.py @@ -13,7 +13,7 @@ from databricks.sdk.mixins.jobs import JobsExt from databricks.sdk.mixins.open_ai_client import ServingEndpointsExt from databricks.sdk.mixins.workspace import WorkspaceExt -from databricks.sdk.service import aibuilder as pkg_aibuilder +from databricks.sdk.service import agentbricks as pkg_agentbricks from databricks.sdk.service import apps as pkg_apps from databricks.sdk.service import billing as pkg_billing from databricks.sdk.service import catalog as pkg_catalog @@ -36,7 +36,7 @@ from databricks.sdk.service import sql as pkg_sql from databricks.sdk.service import vectorsearch as pkg_vectorsearch from databricks.sdk.service import workspace as pkg_workspace -from databricks.sdk.service.aibuilder import AiBuilderAPI +from databricks.sdk.service.agentbricks import AgentBricksAPI from databricks.sdk.service.apps import AppsAPI from databricks.sdk.service.billing import (BillableUsageAPI, BudgetPolicyAPI, BudgetsAPI, LogDeliveryAPI, @@ -240,7 +240,7 @@ def __init__( serving_endpoints = ServingEndpointsExt(self._api_client) self._access_control = pkg_iam.AccessControlAPI(self._api_client) self._account_access_control_proxy = pkg_iam.AccountAccessControlProxyAPI(self._api_client) - self._ai_builder = pkg_aibuilder.AiBuilderAPI(self._api_client) + self._agent_bricks = pkg_agentbricks.AgentBricksAPI(self._api_client) self._alerts = pkg_sql.AlertsAPI(self._api_client) self._alerts_legacy = pkg_sql.AlertsLegacyAPI(self._api_client) self._alerts_v2 = pkg_sql.AlertsV2API(self._api_client) @@ -377,9 +377,9 @@ def account_access_control_proxy(self) -> pkg_iam.AccountAccessControlProxyAPI: return self._account_access_control_proxy @property - def ai_builder(self) -> pkg_aibuilder.AiBuilderAPI: + def agent_bricks(self) -> pkg_agentbricks.AgentBricksAPI: """The Custom LLMs service manages state and powers the UI for the Custom LLM product.""" - return self._ai_builder + return self._agent_bricks @property def alerts(self) -> pkg_sql.AlertsAPI: diff --git a/databricks/sdk/oidc.py b/databricks/sdk/oidc.py index 9f39e3d72..c90313a4c 100644 --- a/databricks/sdk/oidc.py +++ b/databricks/sdk/oidc.py @@ -195,7 +195,7 @@ def token(self) -> oauth.Token: def _exchange_id_token(self, id_token: IdToken) -> oauth.Token: client = oauth.ClientCredentials( client_id=self._client_id, - client_secret="", + client_secret="", # there is no (rotatable) secrets in the OIDC flow token_url=self._token_endpoint, endpoint_params={ "subject_token_type": "urn:ietf:params:oauth:token-type:jwt", diff --git a/databricks/sdk/service/aibuilder.py b/databricks/sdk/service/agentbricks.py similarity index 98% rename from databricks/sdk/service/aibuilder.py rename to databricks/sdk/service/agentbricks.py index 7008a0da9..8cda7ac26 100755 --- a/databricks/sdk/service/aibuilder.py +++ b/databricks/sdk/service/agentbricks.py @@ -23,9 +23,6 @@ class CustomLlm: instructions: str """Instructions for the custom LLM to follow""" - optimization_state: State - """If optimization is kicked off, tracks the state of the custom LLM""" - agent_artifact_path: Optional[str] = None creation_time: Optional[str] = None @@ -45,6 +42,9 @@ class CustomLlm: id: Optional[str] = None + optimization_state: Optional[State] = None + """If optimization is kicked off, tracks the state of the custom LLM""" + def as_dict(self) -> dict: """Serializes the CustomLlm into a dictionary suitable for use as a JSON request body.""" body = {} @@ -190,7 +190,7 @@ def from_dict(cls, d: Dict[str, Any]) -> Table: ) -class AiBuilderAPI: +class AgentBricksAPI: """The Custom LLMs service manages state and powers the UI for the Custom LLM product.""" def __init__(self, api_client): @@ -270,7 +270,7 @@ def delete_custom_llm(self, id: str): "Accept": "application/json", } - self._api.do("DELETE", f"/api/2.0/custom-lms/{id}", headers=headers) + self._api.do("DELETE", f"/api/2.0/custom-llms/{id}", headers=headers) def get_custom_llm(self, id: str) -> CustomLlm: """Get a Custom LLM. diff --git a/databricks/sdk/service/catalog.py b/databricks/sdk/service/catalog.py index 3fe7b66bb..72b874b8a 100755 --- a/databricks/sdk/service/catalog.py +++ b/databricks/sdk/service/catalog.py @@ -7723,6 +7723,7 @@ def from_dict(cls, d: Dict[str, Any]) -> SchemaInfo: class SecurableKind(Enum): + """Latest kind: CONNECTION_SQLSERVER_OAUTH_M2M = 254; Next id:255""" TABLE_DB_STORAGE = "TABLE_DB_STORAGE" TABLE_DELTA = "TABLE_DELTA" diff --git a/databricks/sdk/service/cleanrooms.py b/databricks/sdk/service/cleanrooms.py index 5d6ea9ce6..eaf2df11a 100755 --- a/databricks/sdk/service/cleanrooms.py +++ b/databricks/sdk/service/cleanrooms.py @@ -132,15 +132,24 @@ class CleanRoomAccessRestricted(Enum): class CleanRoomAsset: """Metadata of the clean room asset""" - added_at: Optional[int] = None - """When the asset is added to the clean room, in epoch milliseconds.""" + name: str + """A fully qualified name that uniquely identifies the asset within the clean room. This is also + the name displayed in the clean room UI. + + For UC securable assets (tables, volumes, etc.), the format is + *shared_catalog*.*shared_schema*.*asset_name* + + For notebooks, the name is the notebook file name.""" - asset_type: Optional[CleanRoomAssetAssetType] = None + asset_type: CleanRoomAssetAssetType """The type of the asset.""" + added_at: Optional[int] = None + """When the asset is added to the clean room, in epoch milliseconds.""" + clean_room_name: Optional[str] = None - """The name of the clean room this asset belongs to. This is an output-only field to ensure proper - resource identification.""" + """The name of the clean room this asset belongs to. This field is required for create operations + and populated by the server for responses.""" foreign_table: Optional[CleanRoomAssetForeignTable] = None """Foreign table details available to all collaborators of the clean room. Present if and only if @@ -150,15 +159,6 @@ class CleanRoomAsset: """Local details for a foreign that are only available to its owner. Present if and only if **asset_type** is **FOREIGN_TABLE**""" - name: Optional[str] = None - """A fully qualified name that uniquely identifies the asset within the clean room. This is also - the name displayed in the clean room UI. - - For UC securable assets (tables, volumes, etc.), the format is - *shared_catalog*.*shared_schema*.*asset_name* - - For notebooks, the name is the notebook file name.""" - notebook: Optional[CleanRoomAssetNotebook] = None """Notebook details available to all collaborators of the clean room. Present if and only if **asset_type** is **NOTEBOOK_FILE**""" @@ -314,7 +314,7 @@ def from_dict(cls, d: Dict[str, Any]) -> CleanRoomAssetForeignTable: @dataclass class CleanRoomAssetForeignTableLocalDetails: - local_name: Optional[str] = None + local_name: str """The fully qualified name of the foreign table in its owner's local metastore, in the format of *catalog*.*schema*.*foreign_table_name*""" @@ -340,13 +340,13 @@ def from_dict(cls, d: Dict[str, Any]) -> CleanRoomAssetForeignTableLocalDetails: @dataclass class CleanRoomAssetNotebook: - etag: Optional[str] = None - """Server generated etag that represents the notebook version.""" - - notebook_content: Optional[str] = None + notebook_content: str """Base 64 representation of the notebook contents. This is the same format as returned by :method:workspace/export with the format of **HTML**.""" + etag: Optional[str] = None + """Server generated etag that represents the notebook version.""" + review_state: Optional[CleanRoomNotebookReviewNotebookReviewState] = None """top-level status derived from all reviews""" @@ -432,7 +432,7 @@ def from_dict(cls, d: Dict[str, Any]) -> CleanRoomAssetTable: @dataclass class CleanRoomAssetTableLocalDetails: - local_name: Optional[str] = None + local_name: str """The fully qualified name of the table in its owner's local metastore, in the format of *catalog*.*schema*.*table_name*""" @@ -490,7 +490,7 @@ def from_dict(cls, d: Dict[str, Any]) -> CleanRoomAssetView: @dataclass class CleanRoomAssetViewLocalDetails: - local_name: Optional[str] = None + local_name: str """The fully qualified name of the view in its owner's local metastore, in the format of *catalog*.*schema*.*view_name*""" @@ -516,7 +516,7 @@ def from_dict(cls, d: Dict[str, Any]) -> CleanRoomAssetViewLocalDetails: @dataclass class CleanRoomAssetVolumeLocalDetails: - local_name: Optional[str] = None + local_name: str """The fully qualified name of the volume in its owner's local metastore, in the format of *catalog*.*schema*.*volume_name*""" @@ -1178,8 +1178,8 @@ def create(self, clean_room_name: str, asset: CleanRoomAsset) -> CleanRoomAsset: access the asset. Typically, you should use a group as the clean room owner. :param clean_room_name: str - The name of the clean room this asset belongs to. This is an output-only field to ensure proper - resource identification. + The name of the clean room this asset belongs to. This field is required for create operations and + populated by the server for responses. :param asset: :class:`CleanRoomAsset` :returns: :class:`CleanRoomAsset` diff --git a/databricks/sdk/service/database.py b/databricks/sdk/service/database.py index 183d03140..672ff15b2 100755 --- a/databricks/sdk/service/database.py +++ b/databricks/sdk/service/database.py @@ -707,6 +707,13 @@ class ProvisioningInfoState(Enum): UPDATING = "UPDATING" +class ProvisioningPhase(Enum): + + PROVISIONING_PHASE_INDEX_SCAN = "PROVISIONING_PHASE_INDEX_SCAN" + PROVISIONING_PHASE_INDEX_SORT = "PROVISIONING_PHASE_INDEX_SORT" + PROVISIONING_PHASE_MAIN = "PROVISIONING_PHASE_MAIN" + + @dataclass class RequestedClaims: permission_set: Optional[RequestedClaimsPermissionSet] = None @@ -960,6 +967,9 @@ class SyncedTablePipelineProgress: """The source table Delta version that was last processed by the pipeline. The pipeline may not have completely processed this version yet.""" + provisioning_phase: Optional[ProvisioningPhase] = None + """The current phase of the data synchronization pipeline.""" + sync_progress_completion: Optional[float] = None """The completion ratio of this update. This is a number between 0 and 1.""" @@ -976,6 +986,8 @@ def as_dict(self) -> dict: body["estimated_completion_time_seconds"] = self.estimated_completion_time_seconds if self.latest_version_currently_processing is not None: body["latest_version_currently_processing"] = self.latest_version_currently_processing + if self.provisioning_phase is not None: + body["provisioning_phase"] = self.provisioning_phase.value if self.sync_progress_completion is not None: body["sync_progress_completion"] = self.sync_progress_completion if self.synced_row_count is not None: @@ -991,6 +1003,8 @@ def as_shallow_dict(self) -> dict: body["estimated_completion_time_seconds"] = self.estimated_completion_time_seconds if self.latest_version_currently_processing is not None: body["latest_version_currently_processing"] = self.latest_version_currently_processing + if self.provisioning_phase is not None: + body["provisioning_phase"] = self.provisioning_phase if self.sync_progress_completion is not None: body["sync_progress_completion"] = self.sync_progress_completion if self.synced_row_count is not None: @@ -1005,6 +1019,7 @@ def from_dict(cls, d: Dict[str, Any]) -> SyncedTablePipelineProgress: return cls( estimated_completion_time_seconds=d.get("estimated_completion_time_seconds", None), latest_version_currently_processing=d.get("latest_version_currently_processing", None), + provisioning_phase=_enum(d, "provisioning_phase", ProvisioningPhase), sync_progress_completion=d.get("sync_progress_completion", None), synced_row_count=d.get("synced_row_count", None), total_row_count=d.get("total_row_count", None), @@ -1735,7 +1750,7 @@ def update_database_instance( The name of the instance. This is the unique identifier for the instance. :param database_instance: :class:`DatabaseInstance` :param update_mask: str - The list of fields to update. + The list of fields to update. This field is not yet supported, and is ignored by the server. :returns: :class:`DatabaseInstance` """ diff --git a/databricks/sdk/service/pipelines.py b/databricks/sdk/service/pipelines.py index 74c9cdd13..d59dab0a5 100755 --- a/databricks/sdk/service/pipelines.py +++ b/databricks/sdk/service/pipelines.py @@ -734,9 +734,11 @@ class IngestionSourceType(Enum): NETSUITE = "NETSUITE" ORACLE = "ORACLE" POSTGRESQL = "POSTGRESQL" + REDSHIFT = "REDSHIFT" SALESFORCE = "SALESFORCE" SERVICENOW = "SERVICENOW" SHAREPOINT = "SHAREPOINT" + SQLDW = "SQLDW" SQLSERVER = "SQLSERVER" TERADATA = "TERADATA" WORKDAY_RAAS = "WORKDAY_RAAS" diff --git a/databricks/sdk/service/settings.py b/databricks/sdk/service/settings.py index 2d379c696..975860d8a 100755 --- a/databricks/sdk/service/settings.py +++ b/databricks/sdk/service/settings.py @@ -667,6 +667,7 @@ class ComplianceStandard(Enum): FEDRAMP_HIGH = "FEDRAMP_HIGH" FEDRAMP_IL5 = "FEDRAMP_IL5" FEDRAMP_MODERATE = "FEDRAMP_MODERATE" + GERMANY_C5 = "GERMANY_C5" HIPAA = "HIPAA" HITRUST = "HITRUST" IRAP_PROTECTED = "IRAP_PROTECTED" diff --git a/databricks/sdk/service/sharing.py b/databricks/sdk/service/sharing.py index 3cbb98dc9..a6d69efdf 100755 --- a/databricks/sdk/service/sharing.py +++ b/databricks/sdk/service/sharing.py @@ -2121,7 +2121,8 @@ class SharedDataObjectUpdate: """One of: **ADD**, **REMOVE**, **UPDATE**.""" data_object: Optional[SharedDataObject] = None - """The data object that is being added, removed, or updated.""" + """The data object that is being added, removed, or updated. The maximum number update data objects + allowed is a 100.""" def as_dict(self) -> dict: """Serializes the SharedDataObjectUpdate into a dictionary suitable for use as a JSON request body.""" diff --git a/docs/account/iam/service_principals.rst b/docs/account/iam/service_principals.rst index 78816845f..6ec4fb814 100644 --- a/docs/account/iam/service_principals.rst +++ b/docs/account/iam/service_principals.rst @@ -23,7 +23,10 @@ a = AccountClient() - spn = a.service_principals.create(display_name=f"sdk-{time.time_ns()}") + sp_create = a.service_principals.create(active=True, display_name=f"sdk-{time.time_ns()}") + + # cleanup + a.service_principals.delete(id=sp_create.id) Creates a new service principal in the Databricks account. diff --git a/docs/account/iam/workspace_assignment.rst b/docs/account/iam/workspace_assignment.rst index 2a8043172..fa9c2ee3e 100644 --- a/docs/account/iam/workspace_assignment.rst +++ b/docs/account/iam/workspace_assignment.rst @@ -74,9 +74,9 @@ spn_id = spn.id - workspace_id = os.environ["TEST_WORKSPACE_ID"] + workspace_id = os.environ["DUMMY_WORKSPACE_ID"] - a.workspace_assignment.update( + _ = a.workspace_assignment.update( workspace_id=workspace_id, principal_id=spn_id, permissions=[iam.WorkspacePermission.USER], diff --git a/docs/account/provisioning/credentials.rst b/docs/account/provisioning/credentials.rst index e0103ea36..acb958c8c 100644 --- a/docs/account/provisioning/credentials.rst +++ b/docs/account/provisioning/credentials.rst @@ -24,15 +24,15 @@ a = AccountClient() - creds = a.credentials.create( + role = a.credentials.create( credentials_name=f"sdk-{time.time_ns()}", aws_credentials=provisioning.CreateCredentialAwsCredentials( - sts_role=provisioning.CreateCredentialStsRole(role_arn=os.environ["TEST_LOGDELIVERY_ARN"]) + sts_role=provisioning.CreateCredentialStsRole(role_arn=os.environ["TEST_CROSSACCOUNT_ARN"]) ), ) # cleanup - a.credentials.delete(credentials_id=creds.credentials_id) + a.credentials.delete(credentials_id=role.credentials_id) Creates a Databricks credential configuration that represents cloud cross-account credentials for a specified account. Databricks uses this to set up network infrastructure properly to host Databricks diff --git a/docs/account/provisioning/storage.rst b/docs/account/provisioning/storage.rst index 1da53fb45..a72721a6d 100644 --- a/docs/account/provisioning/storage.rst +++ b/docs/account/provisioning/storage.rst @@ -16,6 +16,7 @@ .. code-block:: + import os import time from databricks.sdk import AccountClient @@ -23,13 +24,13 @@ a = AccountClient() - bucket = a.storage.create( + storage = a.storage.create( storage_configuration_name=f"sdk-{time.time_ns()}", - root_bucket_info=provisioning.RootBucketInfo(bucket_name=f"sdk-{time.time_ns()}"), + root_bucket_info=provisioning.RootBucketInfo(bucket_name=os.environ["TEST_ROOT_BUCKET"]), ) # cleanup - a.storage.delete(storage_configuration_id=bucket.storage_configuration_id) + a.storage.delete(storage_configuration_id=storage.storage_configuration_id) Creates new storage configuration for an account, specified by ID. Uploads a storage configuration object that represents the root AWS S3 bucket in your account. Databricks stores related workspace diff --git a/docs/dbdataclasses/agentbricks.rst b/docs/dbdataclasses/agentbricks.rst new file mode 100644 index 000000000..0b6d30000 --- /dev/null +++ b/docs/dbdataclasses/agentbricks.rst @@ -0,0 +1,39 @@ +Agent Bricks +============ + +These dataclasses are used in the SDK to represent API requests and responses for services in the ``databricks.sdk.service.agentbricks`` module. + +.. py:currentmodule:: databricks.sdk.service.agentbricks +.. autoclass:: CustomLlm + :members: + :undoc-members: + +.. autoclass:: Dataset + :members: + :undoc-members: + +.. py:class:: State + + States of Custom LLM optimization lifecycle. + + .. py:attribute:: CANCELLED + :value: "CANCELLED" + + .. py:attribute:: COMPLETED + :value: "COMPLETED" + + .. py:attribute:: CREATED + :value: "CREATED" + + .. py:attribute:: FAILED + :value: "FAILED" + + .. py:attribute:: PENDING + :value: "PENDING" + + .. py:attribute:: RUNNING + :value: "RUNNING" + +.. autoclass:: Table + :members: + :undoc-members: diff --git a/docs/dbdataclasses/catalog.rst b/docs/dbdataclasses/catalog.rst index 626db6121..fb15f345f 100644 --- a/docs/dbdataclasses/catalog.rst +++ b/docs/dbdataclasses/catalog.rst @@ -1362,6 +1362,8 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: SecurableKind + Latest kind: CONNECTION_SQLSERVER_OAUTH_M2M = 254; Next id:255 + .. py:attribute:: TABLE_DB_STORAGE :value: "TABLE_DB_STORAGE" diff --git a/docs/dbdataclasses/database.rst b/docs/dbdataclasses/database.rst index bc7607faf..534c9b0d0 100644 --- a/docs/dbdataclasses/database.rst +++ b/docs/dbdataclasses/database.rst @@ -109,6 +109,17 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: UPDATING :value: "UPDATING" +.. py:class:: ProvisioningPhase + + .. py:attribute:: PROVISIONING_PHASE_INDEX_SCAN + :value: "PROVISIONING_PHASE_INDEX_SCAN" + + .. py:attribute:: PROVISIONING_PHASE_INDEX_SORT + :value: "PROVISIONING_PHASE_INDEX_SORT" + + .. py:attribute:: PROVISIONING_PHASE_MAIN + :value: "PROVISIONING_PHASE_MAIN" + .. autoclass:: RequestedClaims :members: :undoc-members: diff --git a/docs/dbdataclasses/index.rst b/docs/dbdataclasses/index.rst index ca9fd5a4d..4b769f812 100644 --- a/docs/dbdataclasses/index.rst +++ b/docs/dbdataclasses/index.rst @@ -5,7 +5,7 @@ Dataclasses .. toctree:: :maxdepth: 1 - aibuilder + agentbricks apps billing catalog diff --git a/docs/dbdataclasses/pipelines.rst b/docs/dbdataclasses/pipelines.rst index 436967f5c..3b017a455 100644 --- a/docs/dbdataclasses/pipelines.rst +++ b/docs/dbdataclasses/pipelines.rst @@ -158,6 +158,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: POSTGRESQL :value: "POSTGRESQL" + .. py:attribute:: REDSHIFT + :value: "REDSHIFT" + .. py:attribute:: SALESFORCE :value: "SALESFORCE" @@ -167,6 +170,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: SHAREPOINT :value: "SHAREPOINT" + .. py:attribute:: SQLDW + :value: "SQLDW" + .. py:attribute:: SQLSERVER :value: "SQLSERVER" diff --git a/docs/dbdataclasses/settings.rst b/docs/dbdataclasses/settings.rst index 59383f091..e09f827f6 100644 --- a/docs/dbdataclasses/settings.rst +++ b/docs/dbdataclasses/settings.rst @@ -140,6 +140,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: FEDRAMP_MODERATE :value: "FEDRAMP_MODERATE" + .. py:attribute:: GERMANY_C5 + :value: "GERMANY_C5" + .. py:attribute:: HIPAA :value: "HIPAA" diff --git a/docs/gen-client-docs.py b/docs/gen-client-docs.py index 66235d26c..a48d1a7ab 100644 --- a/docs/gen-client-docs.py +++ b/docs/gen-client-docs.py @@ -260,9 +260,9 @@ class Generator: "Create Database Instances and manage their configurations, including integrations with Unity Catalog" ), Package( - "aibuilder", - "AI Builder", - "Create and manage AI Builder resources." + "agentbricks", + "Agent Bricks", + "Create and manage Agent Bricks resources." ) ] diff --git a/docs/workspace/agentbricks/agent_bricks.rst b/docs/workspace/agentbricks/agent_bricks.rst new file mode 100644 index 000000000..ca017c49a --- /dev/null +++ b/docs/workspace/agentbricks/agent_bricks.rst @@ -0,0 +1,92 @@ +``w.agent_bricks``: Agent Bricks Service +======================================== +.. currentmodule:: databricks.sdk.service.agentbricks + +.. py:class:: AgentBricksAPI + + The Custom LLMs service manages state and powers the UI for the Custom LLM product. + + .. py:method:: cancel_optimize(id: str) + + Cancel a Custom LLM Optimization Run. + + :param id: str + + + + + .. py:method:: create_custom_llm(name: str, instructions: str [, agent_artifact_path: Optional[str], datasets: Optional[List[Dataset]], guidelines: Optional[List[str]]]) -> CustomLlm + + Create a Custom LLM. + + :param name: str + Name of the custom LLM. Only alphanumeric characters and dashes allowed. + :param instructions: str + Instructions for the custom LLM to follow + :param agent_artifact_path: str (optional) + Optional: UC path for agent artifacts. If you are using a dataset that you only have read + permissions, please provide a destination path where you have write permissions. Please provide this + in catalog.schema format. + :param datasets: List[:class:`Dataset`] (optional) + Datasets used for training and evaluating the model, not for inference. Currently, only 1 dataset is + accepted. + :param guidelines: List[str] (optional) + Guidelines for the custom LLM to adhere to + + :returns: :class:`CustomLlm` + + + .. py:method:: delete_custom_llm(id: str) + + Delete a Custom LLM. + + :param id: str + The id of the custom llm + + + + + .. py:method:: get_custom_llm(id: str) -> CustomLlm + + Get a Custom LLM. + + :param id: str + The id of the custom llm + + :returns: :class:`CustomLlm` + + + .. py:method:: start_optimize(id: str) -> CustomLlm + + Start a Custom LLM Optimization Run. + + :param id: str + The Id of the tile. + + :returns: :class:`CustomLlm` + + + .. py:method:: update_custom_llm(id: str, custom_llm: CustomLlm, update_mask: str) -> CustomLlm + + Update a Custom LLM. + + :param id: str + The id of the custom llm + :param custom_llm: :class:`CustomLlm` + The CustomLlm containing the fields which should be updated. + :param update_mask: str + The list of the CustomLlm fields to update. These should correspond to the values (or lack thereof) + present in `custom_llm`. + + The field mask must be a single string, with multiple fields separated by commas (no spaces). The + field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., + `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only + the entire collection field can be specified. Field names must exactly match the resource field + names. + + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the + fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API + changes in the future. + + :returns: :class:`CustomLlm` + \ No newline at end of file diff --git a/docs/workspace/agentbricks/index.rst b/docs/workspace/agentbricks/index.rst new file mode 100644 index 000000000..555aed7b0 --- /dev/null +++ b/docs/workspace/agentbricks/index.rst @@ -0,0 +1,10 @@ + +Agent Bricks +============ + +Create and manage Agent Bricks resources. + +.. toctree:: + :maxdepth: 1 + + agent_bricks \ No newline at end of file diff --git a/docs/workspace/catalog/catalogs.rst b/docs/workspace/catalog/catalogs.rst index c486ab0d1..9a18ede8a 100644 --- a/docs/workspace/catalog/catalogs.rst +++ b/docs/workspace/catalog/catalogs.rst @@ -24,10 +24,10 @@ w = WorkspaceClient() - created_catalog = w.catalogs.create(name=f"sdk-{time.time_ns()}") + created = w.catalogs.create(name=f"sdk-{time.time_ns()}") # cleanup - w.catalogs.delete(name=created_catalog.name, force=True) + w.catalogs.delete(name=created.name, force=True) Creates a new catalog instance in the parent metastore if the caller is a metastore admin or has the **CREATE_CATALOG** privilege. diff --git a/docs/workspace/catalog/external_locations.rst b/docs/workspace/catalog/external_locations.rst index e7c1fd75e..668d4726b 100644 --- a/docs/workspace/catalog/external_locations.rst +++ b/docs/workspace/catalog/external_locations.rst @@ -30,20 +30,22 @@ w = WorkspaceClient() - credential = w.storage_credentials.create( + storage_credential = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + comment="created via SDK", ) - created = w.external_locations.create( + external_location = w.external_locations.create( name=f"sdk-{time.time_ns()}", - credential_name=credential.name, - url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), + credential_name=storage_credential.name, + comment="created via SDK", + url="s3://" + os.environ["TEST_BUCKET"] + "/" + f"sdk-{time.time_ns()}", ) # cleanup - w.storage_credentials.delete(name=credential.name) - w.external_locations.delete(name=created.name) + w.storage_credentials.delete(name=storage_credential.name) + w.external_locations.delete(name=external_location.name) Creates a new external location entry in the metastore. The caller must be a metastore admin or have the **CREATE_EXTERNAL_LOCATION** privilege on both the metastore and the associated storage @@ -179,24 +181,24 @@ credential = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) created = w.external_locations.create( name=f"sdk-{time.time_ns()}", credential_name=credential.name, - url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), + url=f's3://{os.environ["TEST_BUCKET"]}/sdk-{time.time_ns()}', ) _ = w.external_locations.update( name=created.name, credential_name=credential.name, - url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), + url=f's3://{os.environ["TEST_BUCKET"]}/sdk-{time.time_ns()}', ) # cleanup - w.storage_credentials.delete(name=credential.name) - w.external_locations.delete(name=created.name) + w.storage_credentials.delete(delete=credential.name) + w.external_locations.delete(delete=created.name) Updates an external location in the metastore. The caller must be the owner of the external location, or be a metastore admin. In the second case, the admin can only update the name of the external diff --git a/docs/workspace/catalog/storage_credentials.rst b/docs/workspace/catalog/storage_credentials.rst index 2d4dc160c..194069200 100644 --- a/docs/workspace/catalog/storage_credentials.rst +++ b/docs/workspace/catalog/storage_credentials.rst @@ -30,13 +30,13 @@ w = WorkspaceClient() - credential = w.storage_credentials.create( + created = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) # cleanup - w.storage_credentials.delete(name=credential.name) + w.storage_credentials.delete(name=created.name) Creates a new storage credential. @@ -123,11 +123,10 @@ .. code-block:: from databricks.sdk import WorkspaceClient - from databricks.sdk.service import catalog w = WorkspaceClient() - all = w.storage_credentials.list(catalog.ListStorageCredentialsRequest()) + all = w.storage_credentials.list() Gets an array of storage credentials (as __StorageCredentialInfo__ objects). The array is limited to only those storage credentials the caller has permission to access. If the caller is a metastore @@ -163,17 +162,17 @@ created = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) _ = w.storage_credentials.update( name=created.name, comment=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) # cleanup - w.storage_credentials.delete(delete=created.name) + w.storage_credentials.delete(name=created.name) Updates a storage credential on the metastore. diff --git a/docs/workspace/catalog/tables.rst b/docs/workspace/catalog/tables.rst index efeea33f6..0ecc0774c 100644 --- a/docs/workspace/catalog/tables.rst +++ b/docs/workspace/catalog/tables.rst @@ -117,7 +117,7 @@ created_schema = w.schemas.create(name=f"sdk-{time.time_ns()}", catalog_name=created_catalog.name) - all_tables = w.tables.list(catalog_name=created_catalog.name, schema_name=created_schema.name) + summaries = w.tables.list_summaries(catalog_name=created_catalog.name, schema_name_pattern=created_schema.name) # cleanup w.schemas.delete(full_name=created_schema.full_name) diff --git a/docs/workspace/cleanrooms/clean_room_assets.rst b/docs/workspace/cleanrooms/clean_room_assets.rst index fa5772a76..ef32dca12 100644 --- a/docs/workspace/cleanrooms/clean_room_assets.rst +++ b/docs/workspace/cleanrooms/clean_room_assets.rst @@ -15,8 +15,8 @@ access the asset. Typically, you should use a group as the clean room owner. :param clean_room_name: str - The name of the clean room this asset belongs to. This is an output-only field to ensure proper - resource identification. + The name of the clean room this asset belongs to. This field is required for create operations and + populated by the server for responses. :param asset: :class:`CleanRoomAsset` :returns: :class:`CleanRoomAsset` diff --git a/docs/workspace/compute/clusters.rst b/docs/workspace/compute/clusters.rst index db78626ff..d46b8ecd0 100644 --- a/docs/workspace/compute/clusters.rst +++ b/docs/workspace/compute/clusters.rst @@ -647,10 +647,11 @@ .. code-block:: from databricks.sdk import WorkspaceClient + from databricks.sdk.service import compute w = WorkspaceClient() - nodes = w.clusters.list_node_types() + all = w.clusters.list(compute.ListClustersRequest()) Return information about all pinned and active clusters, and all clusters terminated within the last 30 days. Clusters terminated prior to this period are not included. diff --git a/docs/workspace/database/database.rst b/docs/workspace/database/database.rst index 57c7b737f..dca0dfa6a 100644 --- a/docs/workspace/database/database.rst +++ b/docs/workspace/database/database.rst @@ -218,7 +218,7 @@ The name of the instance. This is the unique identifier for the instance. :param database_instance: :class:`DatabaseInstance` :param update_mask: str - The list of fields to update. + The list of fields to update. This field is not yet supported, and is ignored by the server. :returns: :class:`DatabaseInstance` \ No newline at end of file diff --git a/docs/workspace/iam/groups.rst b/docs/workspace/iam/groups.rst index 764a81ab9..737939095 100644 --- a/docs/workspace/iam/groups.rst +++ b/docs/workspace/iam/groups.rst @@ -69,9 +69,6 @@ group = w.groups.create(display_name=f"sdk-{time.time_ns()}") w.groups.delete(id=group.id) - - # cleanup - w.groups.delete(id=group.id) Deletes a group from the Databricks workspace. diff --git a/docs/workspace/iam/service_principals.rst b/docs/workspace/iam/service_principals.rst index ce8978afb..0d0d447b9 100644 --- a/docs/workspace/iam/service_principals.rst +++ b/docs/workspace/iam/service_principals.rst @@ -20,19 +20,13 @@ import time from databricks.sdk import WorkspaceClient - from databricks.sdk.service import iam w = WorkspaceClient() - groups = w.groups.group_display_name_to_id_map(iam.ListGroupsRequest()) - - spn = w.service_principals.create( - display_name=f"sdk-{time.time_ns()}", - groups=[iam.ComplexValue(value=groups["admins"])], - ) + created = w.service_principals.create(display_name=f"sdk-{time.time_ns()}") # cleanup - w.service_principals.delete(id=spn.id) + w.service_principals.delete(id=created.id) Creates a new service principal in the Databricks workspace. diff --git a/docs/workspace/iam/users.rst b/docs/workspace/iam/users.rst index fbee85661..04188cc1f 100644 --- a/docs/workspace/iam/users.rst +++ b/docs/workspace/iam/users.rst @@ -78,9 +78,12 @@ w = WorkspaceClient() - other_owner = w.users.create(user_name=f"sdk-{time.time_ns()}@example.com") + user = w.users.create( + display_name=f"sdk-{time.time_ns()}", + user_name=f"sdk-{time.time_ns()}@example.com", + ) - w.users.delete(id=other_owner.id) + w.users.delete(id=user.id) Deletes a user. Deleting a user from a Databricks workspace also removes objects associated with the user. diff --git a/docs/workspace/index.rst b/docs/workspace/index.rst index 16ff69dc8..bc5ca6afb 100644 --- a/docs/workspace/index.rst +++ b/docs/workspace/index.rst @@ -7,7 +7,7 @@ These APIs are available from WorkspaceClient .. toctree:: :maxdepth: 1 - aibuilder/index + agentbricks/index apps/index catalog/index cleanrooms/index diff --git a/docs/workspace/jobs/jobs.rst b/docs/workspace/jobs/jobs.rst index d4fdba07f..d68e92a5c 100644 --- a/docs/workspace/jobs/jobs.rst +++ b/docs/workspace/jobs/jobs.rst @@ -353,23 +353,21 @@ w.clusters.ensure_cluster_is_running(os.environ["DATABRICKS_CLUSTER_ID"]) and os.environ["DATABRICKS_CLUSTER_ID"] ) - created_job = w.jobs.create( - name=f"sdk-{time.time_ns()}", + run = w.jobs.submit( + run_name=f"sdk-{time.time_ns()}", tasks=[ - jobs.Task( - description="test", + jobs.SubmitTask( existing_cluster_id=cluster_id, notebook_task=jobs.NotebookTask(notebook_path=notebook_path), - task_key="test", - timeout_seconds=0, + task_key=f"sdk-{time.time_ns()}", ) ], - ) + ).result() - by_id = w.jobs.get(job_id=created_job.job_id) + output = w.jobs.get_run_output(run_id=run.tasks[0].run_id) # cleanup - w.jobs.delete(job_id=created_job.job_id) + w.jobs.delete_run(run_id=run.run_id) Get a single job. diff --git a/docs/workspace/ml/model_registry.rst b/docs/workspace/ml/model_registry.rst index 98d803a63..e416ac56b 100644 --- a/docs/workspace/ml/model_registry.rst +++ b/docs/workspace/ml/model_registry.rst @@ -90,7 +90,9 @@ w = WorkspaceClient() - created = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") + model = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") + + mv = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") Creates a new registered model with the name specified in the request body. Throws `RESOURCE_ALREADY_EXISTS` if a registered model with the given name exists. @@ -120,7 +122,7 @@ model = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") - created = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") + mv = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") Creates a model version. diff --git a/docs/workspace/sharing/providers.rst b/docs/workspace/sharing/providers.rst index 1a7c88de9..fd81e1b24 100644 --- a/docs/workspace/sharing/providers.rst +++ b/docs/workspace/sharing/providers.rst @@ -101,25 +101,12 @@ .. code-block:: - import time - from databricks.sdk import WorkspaceClient + from databricks.sdk.service import sharing w = WorkspaceClient() - public_share_recipient = """{ - "shareCredentialsVersion":1, - "bearerToken":"dapiabcdefghijklmonpqrstuvwxyz", - "endpoint":"https://sharing.delta.io/delta-sharing/" - } - """ - - created = w.providers.create(name=f"sdk-{time.time_ns()}", recipient_profile_str=public_share_recipient) - - shares = w.providers.list_shares(name=created.name) - - # cleanup - w.providers.delete(name=created.name) + all = w.providers.list(sharing.ListProvidersRequest()) Gets an array of available authentication providers. The caller must either be a metastore admin or the owner of the providers. Providers not owned by the caller are not included in the response. There diff --git a/docs/workspace/workspace/workspace.rst b/docs/workspace/workspace/workspace.rst index 03dae035c..2c369968e 100644 --- a/docs/workspace/workspace/workspace.rst +++ b/docs/workspace/workspace/workspace.rst @@ -178,7 +178,7 @@ content=base64.b64encode(("CREATE LIVE TABLE dlt_sample AS SELECT 1").encode()).decode(), format=workspace.ImportFormat.SOURCE, language=workspace.Language.SQL, - overwrite=true_, + overwrite=True, path=notebook_path, ) @@ -223,14 +223,16 @@ .. code-block:: + import os + import time + from databricks.sdk import WorkspaceClient w = WorkspaceClient() - names = [] - for i in w.workspace.list(f"/Users/{w.current_user.me().user_name}", recursive=True): - names.append(i.path) - assert len(names) > 0 + notebook = f"/Users/{w.current_user.me().user_name}/sdk-{time.time_ns()}" + + objects = w.workspace.list(path=os.path.dirname(notebook)) List workspace objects