diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 4d77e7183..a68714d08 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -7de38b0552c78117c01aab884acd9b899a9f4d7f \ No newline at end of file +3a7fe4deb693ca98d89b044116aaf008efd895a5 \ No newline at end of file diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index a5721cff8..6d11dd310 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -13,3 +13,14 @@ ### Internal Changes ### API Changes +* Added `statement_id_signature` field for `databricks.sdk.service.dashboards.Result`. +* Added `effective_database_instance_name` and `effective_logical_database_name` fields for `databricks.sdk.service.database.SyncedDatabaseTable`. +* Added `table` field for `databricks.sdk.service.jobs.TriggerStateProto`. +* Added `email_notifications` field for `databricks.sdk.service.serving.CreatePtEndpointRequest`. +* Added `email_notifications` field for `databricks.sdk.service.serving.CreateServingEndpoint`. +* Added `email_notifications` field for `databricks.sdk.service.serving.ServingEndpointDetailed`. +* [Breaking] Changed `list()` method for [w.consumer_providers](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/marketplace/consumer_providers.html) workspace-level service . New request type is `databricks.sdk.service.marketplace.ListConsumerProvidersRequest` dataclass. +* [Breaking] Changed `create()` method for [a.private_access](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/private_access.html) account-level service with new required argument order. +* [Breaking] Changed `create()` method for [a.private_access](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/private_access.html) account-level service . New request type is `databricks.sdk.service.provisioning.CreatePrivateAccessSettingsRequest` dataclass. +* [Breaking] Changed `replace()` method for [a.private_access](https://databricks-sdk-py.readthedocs.io/en/latest/account/provisioning/private_access.html) account-level service . New request type is `databricks.sdk.service.provisioning.ReplacePrivateAccessSettingsRequest` dataclass. +* [Breaking] Removed `is_featured` field for `databricks.sdk.service.marketplace.ListProvidersRequest`. \ No newline at end of file diff --git a/databricks/sdk/service/apps.py b/databricks/sdk/service/apps.py index 22caa3809..797b353de 100755 --- a/databricks/sdk/service/apps.py +++ b/databricks/sdk/service/apps.py @@ -32,6 +32,8 @@ class App: app_status: Optional[ApplicationStatus] = None budget_policy_id: Optional[str] = None + """TODO: Deprecate this field after serverless entitlements are released to all prod stages and the + new usage_policy_id is properly populated and used.""" compute_status: Optional[ComputeStatus] = None @@ -49,6 +51,8 @@ class App: """The description of the app.""" effective_budget_policy_id: Optional[str] = None + """TODO: Deprecate this field after serverless entitlements are released to all prod stages and the + new usage_policy_id is properly populated and used.""" effective_user_api_scopes: Optional[List[str]] = None """The effective api scopes granted to the user access token.""" diff --git a/databricks/sdk/service/dashboards.py b/databricks/sdk/service/dashboards.py index 8861916d9..9917705da 100755 --- a/databricks/sdk/service/dashboards.py +++ b/databricks/sdk/service/dashboards.py @@ -1148,6 +1148,9 @@ class Result: """Statement Execution API statement id. Use [Get status, manifest, and result first chunk](:method:statementexecution/getstatement) to get the full result data.""" + statement_id_signature: Optional[str] = None + """JWT corresponding to the statement contained in this result""" + def as_dict(self) -> dict: """Serializes the Result into a dictionary suitable for use as a JSON request body.""" body = {} @@ -1157,6 +1160,8 @@ def as_dict(self) -> dict: body["row_count"] = self.row_count if self.statement_id is not None: body["statement_id"] = self.statement_id + if self.statement_id_signature is not None: + body["statement_id_signature"] = self.statement_id_signature return body def as_shallow_dict(self) -> dict: @@ -1168,6 +1173,8 @@ def as_shallow_dict(self) -> dict: body["row_count"] = self.row_count if self.statement_id is not None: body["statement_id"] = self.statement_id + if self.statement_id_signature is not None: + body["statement_id_signature"] = self.statement_id_signature return body @classmethod @@ -1177,6 +1184,7 @@ def from_dict(cls, d: Dict[str, Any]) -> Result: is_truncated=d.get("is_truncated", None), row_count=d.get("row_count", None), statement_id=d.get("statement_id", None), + statement_id_signature=d.get("statement_id_signature", None), ) diff --git a/databricks/sdk/service/database.py b/databricks/sdk/service/database.py index 25ddf5a0e..20c2f37f5 100755 --- a/databricks/sdk/service/database.py +++ b/databricks/sdk/service/database.py @@ -790,7 +790,7 @@ def from_dict(cls, d: Dict[str, Any]) -> RequestedResource: @dataclass class SyncedDatabaseTable: - """Next field marker: 12""" + """Next field marker: 14""" name: str """Full three-part (catalog, schema, table) name of the table.""" @@ -805,6 +805,14 @@ class SyncedDatabaseTable: database instance name MUST match that of the registered catalog (or the request will be rejected).""" + effective_database_instance_name: Optional[str] = None + """The name of the database instance that this table is registered to. This field is always + returned, and for tables inside database catalogs is inferred database instance associated with + the catalog.""" + + effective_logical_database_name: Optional[str] = None + """The name of the logical database that this table is registered to.""" + logical_database_name: Optional[str] = None """Target Postgres database object (logical database) name for this table. @@ -831,6 +839,10 @@ def as_dict(self) -> dict: body["data_synchronization_status"] = self.data_synchronization_status.as_dict() if self.database_instance_name is not None: body["database_instance_name"] = self.database_instance_name + if self.effective_database_instance_name is not None: + body["effective_database_instance_name"] = self.effective_database_instance_name + if self.effective_logical_database_name is not None: + body["effective_logical_database_name"] = self.effective_logical_database_name if self.logical_database_name is not None: body["logical_database_name"] = self.logical_database_name if self.name is not None: @@ -848,6 +860,10 @@ def as_shallow_dict(self) -> dict: body["data_synchronization_status"] = self.data_synchronization_status if self.database_instance_name is not None: body["database_instance_name"] = self.database_instance_name + if self.effective_database_instance_name is not None: + body["effective_database_instance_name"] = self.effective_database_instance_name + if self.effective_logical_database_name is not None: + body["effective_logical_database_name"] = self.effective_logical_database_name if self.logical_database_name is not None: body["logical_database_name"] = self.logical_database_name if self.name is not None: @@ -864,6 +880,8 @@ def from_dict(cls, d: Dict[str, Any]) -> SyncedDatabaseTable: return cls( data_synchronization_status=_from_dict(d, "data_synchronization_status", SyncedTableStatus), database_instance_name=d.get("database_instance_name", None), + effective_database_instance_name=d.get("effective_database_instance_name", None), + effective_logical_database_name=d.get("effective_logical_database_name", None), logical_database_name=d.get("logical_database_name", None), name=d.get("name", None), spec=_from_dict(d, "spec", SyncedTableSpec), diff --git a/databricks/sdk/service/jobs.py b/databricks/sdk/service/jobs.py index 50c7ddb83..d7c16b3c8 100755 --- a/databricks/sdk/service/jobs.py +++ b/databricks/sdk/service/jobs.py @@ -7223,6 +7223,73 @@ def from_dict(cls, d: Dict[str, Any]) -> SubscriptionSubscriber: return cls(destination_id=d.get("destination_id", None), user_name=d.get("user_name", None)) +@dataclass +class TableState: + has_seen_updates: Optional[bool] = None + """Whether or not the table has seen updates since either the creation of the trigger or the last + successful evaluation of the trigger""" + + table_name: Optional[str] = None + """Full table name of the table to monitor, e.g. `mycatalog.myschema.mytable`""" + + def as_dict(self) -> dict: + """Serializes the TableState into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.has_seen_updates is not None: + body["has_seen_updates"] = self.has_seen_updates + if self.table_name is not None: + body["table_name"] = self.table_name + return body + + def as_shallow_dict(self) -> dict: + """Serializes the TableState into a shallow dictionary of its immediate attributes.""" + body = {} + if self.has_seen_updates is not None: + body["has_seen_updates"] = self.has_seen_updates + if self.table_name is not None: + body["table_name"] = self.table_name + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> TableState: + """Deserializes the TableState from a dictionary.""" + return cls(has_seen_updates=d.get("has_seen_updates", None), table_name=d.get("table_name", None)) + + +@dataclass +class TableTriggerState: + last_seen_table_states: Optional[List[TableState]] = None + + using_scalable_monitoring: Optional[bool] = None + """Indicates whether the trigger is using scalable monitoring.""" + + def as_dict(self) -> dict: + """Serializes the TableTriggerState into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.last_seen_table_states: + body["last_seen_table_states"] = [v.as_dict() for v in self.last_seen_table_states] + if self.using_scalable_monitoring is not None: + body["using_scalable_monitoring"] = self.using_scalable_monitoring + return body + + def as_shallow_dict(self) -> dict: + """Serializes the TableTriggerState into a shallow dictionary of its immediate attributes.""" + body = {} + if self.last_seen_table_states: + body["last_seen_table_states"] = self.last_seen_table_states + if self.using_scalable_monitoring is not None: + body["using_scalable_monitoring"] = self.using_scalable_monitoring + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> TableTriggerState: + """Deserializes the TableTriggerState from a dictionary.""" + return cls( + last_seen_table_states=_repeated_dict(d, "last_seen_table_states", TableState), + using_scalable_monitoring=d.get("using_scalable_monitoring", None), + ) + + @dataclass class TableUpdateTriggerConfiguration: condition: Optional[Condition] = None @@ -7993,11 +8060,15 @@ def from_dict(cls, d: Dict[str, Any]) -> TriggerSettings: class TriggerStateProto: file_arrival: Optional[FileArrivalTriggerState] = None + table: Optional[TableTriggerState] = None + def as_dict(self) -> dict: """Serializes the TriggerStateProto into a dictionary suitable for use as a JSON request body.""" body = {} if self.file_arrival: body["file_arrival"] = self.file_arrival.as_dict() + if self.table: + body["table"] = self.table.as_dict() return body def as_shallow_dict(self) -> dict: @@ -8005,12 +8076,17 @@ def as_shallow_dict(self) -> dict: body = {} if self.file_arrival: body["file_arrival"] = self.file_arrival + if self.table: + body["table"] = self.table return body @classmethod def from_dict(cls, d: Dict[str, Any]) -> TriggerStateProto: """Deserializes the TriggerStateProto from a dictionary.""" - return cls(file_arrival=_from_dict(d, "file_arrival", FileArrivalTriggerState)) + return cls( + file_arrival=_from_dict(d, "file_arrival", FileArrivalTriggerState), + table=_from_dict(d, "table", TableTriggerState), + ) class TriggerType(Enum): diff --git a/databricks/sdk/service/serving.py b/databricks/sdk/service/serving.py index f6fb4ccf2..b74651abd 100755 --- a/databricks/sdk/service/serving.py +++ b/databricks/sdk/service/serving.py @@ -990,10 +990,13 @@ def from_dict(cls, d: Dict[str, Any]) -> DatabricksModelServingConfig: @dataclass class DataframeSplitInput: columns: Optional[List[Any]] = None + """Columns array for the dataframe""" data: Optional[List[Any]] = None + """Data array for the dataframe""" index: Optional[List[int]] = None + """Index array for the dataframe""" def as_dict(self) -> dict: """Serializes the DataframeSplitInput into a dictionary suitable for use as a JSON request body.""" @@ -1041,9 +1044,46 @@ def from_dict(cls, d: Dict[str, Any]) -> DeleteResponse: return cls() +@dataclass +class EmailNotifications: + on_update_failure: Optional[List[str]] = None + """A list of email addresses to be notified when an endpoint fails to update its configuration or + state.""" + + on_update_success: Optional[List[str]] = None + """A list of email addresses to be notified when an endpoint successfully updates its configuration + or state.""" + + def as_dict(self) -> dict: + """Serializes the EmailNotifications into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.on_update_failure: + body["on_update_failure"] = [v for v in self.on_update_failure] + if self.on_update_success: + body["on_update_success"] = [v for v in self.on_update_success] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the EmailNotifications into a shallow dictionary of its immediate attributes.""" + body = {} + if self.on_update_failure: + body["on_update_failure"] = self.on_update_failure + if self.on_update_success: + body["on_update_success"] = self.on_update_success + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> EmailNotifications: + """Deserializes the EmailNotifications from a dictionary.""" + return cls( + on_update_failure=d.get("on_update_failure", None), on_update_success=d.get("on_update_success", None) + ) + + @dataclass class EmbeddingsV1ResponseEmbeddingElement: embedding: Optional[List[float]] = None + """The embedding vector""" index: Optional[int] = None """The index of the embedding in the response.""" @@ -3262,11 +3302,11 @@ def from_dict(cls, d: Dict[str, Any]) -> ServedModelState: class ServedModelStateDeployment(Enum): - ABORTED = "DEPLOYMENT_ABORTED" - CREATING = "DEPLOYMENT_CREATING" - FAILED = "DEPLOYMENT_FAILED" - READY = "DEPLOYMENT_READY" - RECOVERING = "DEPLOYMENT_RECOVERING" + DEPLOYMENT_ABORTED = "DEPLOYMENT_ABORTED" + DEPLOYMENT_CREATING = "DEPLOYMENT_CREATING" + DEPLOYMENT_FAILED = "DEPLOYMENT_FAILED" + DEPLOYMENT_READY = "DEPLOYMENT_READY" + DEPLOYMENT_RECOVERING = "DEPLOYMENT_RECOVERING" @dataclass @@ -3545,6 +3585,9 @@ class ServingEndpointDetailed: description: Optional[str] = None """Description of the serving model""" + email_notifications: Optional[EmailNotifications] = None + """Email notification settings.""" + endpoint_url: Optional[str] = None """Endpoint invocation url if route optimization is enabled for endpoint""" @@ -3593,6 +3636,8 @@ def as_dict(self) -> dict: body["data_plane_info"] = self.data_plane_info.as_dict() if self.description is not None: body["description"] = self.description + if self.email_notifications: + body["email_notifications"] = self.email_notifications.as_dict() if self.endpoint_url is not None: body["endpoint_url"] = self.endpoint_url if self.id is not None: @@ -3632,6 +3677,8 @@ def as_shallow_dict(self) -> dict: body["data_plane_info"] = self.data_plane_info if self.description is not None: body["description"] = self.description + if self.email_notifications: + body["email_notifications"] = self.email_notifications if self.endpoint_url is not None: body["endpoint_url"] = self.endpoint_url if self.id is not None: @@ -3665,6 +3712,7 @@ def from_dict(cls, d: Dict[str, Any]) -> ServingEndpointDetailed: creator=d.get("creator", None), data_plane_info=_from_dict(d, "data_plane_info", ModelDataPlaneInfo), description=d.get("description", None), + email_notifications=_from_dict(d, "email_notifications", EmailNotifications), endpoint_url=d.get("endpoint_url", None), id=d.get("id", None), last_updated_timestamp=d.get("last_updated_timestamp", None), @@ -3978,6 +4026,7 @@ def create( budget_policy_id: Optional[str] = None, config: Optional[EndpointCoreConfigInput] = None, description: Optional[str] = None, + email_notifications: Optional[EmailNotifications] = None, rate_limits: Optional[List[RateLimit]] = None, route_optimized: Optional[bool] = None, tags: Optional[List[EndpointTag]] = None, @@ -3996,6 +4045,8 @@ def create( :param config: :class:`EndpointCoreConfigInput` (optional) The core config of the serving endpoint. :param description: str (optional) + :param email_notifications: :class:`EmailNotifications` (optional) + Email notification settings. :param rate_limits: List[:class:`RateLimit`] (optional) Rate limits to be applied to the serving endpoint. NOTE: this field is deprecated, please use AI Gateway to manage rate limits. @@ -4017,6 +4068,8 @@ def create( body["config"] = config.as_dict() if description is not None: body["description"] = description + if email_notifications is not None: + body["email_notifications"] = email_notifications.as_dict() if name is not None: body["name"] = name if rate_limits is not None: @@ -4045,6 +4098,7 @@ def create_and_wait( budget_policy_id: Optional[str] = None, config: Optional[EndpointCoreConfigInput] = None, description: Optional[str] = None, + email_notifications: Optional[EmailNotifications] = None, rate_limits: Optional[List[RateLimit]] = None, route_optimized: Optional[bool] = None, tags: Optional[List[EndpointTag]] = None, @@ -4055,6 +4109,7 @@ def create_and_wait( budget_policy_id=budget_policy_id, config=config, description=description, + email_notifications=email_notifications, name=name, rate_limits=rate_limits, route_optimized=route_optimized, @@ -4068,6 +4123,7 @@ def create_provisioned_throughput_endpoint( *, ai_gateway: Optional[AiGatewayConfig] = None, budget_policy_id: Optional[str] = None, + email_notifications: Optional[EmailNotifications] = None, tags: Optional[List[EndpointTag]] = None, ) -> Wait[ServingEndpointDetailed]: """Create a new PT serving endpoint. @@ -4081,6 +4137,8 @@ def create_provisioned_throughput_endpoint( The AI Gateway configuration for the serving endpoint. :param budget_policy_id: str (optional) The budget policy associated with the endpoint. + :param email_notifications: :class:`EmailNotifications` (optional) + Email notification settings. :param tags: List[:class:`EndpointTag`] (optional) Tags to be attached to the serving endpoint and automatically propagated to billing logs. @@ -4095,6 +4153,8 @@ def create_provisioned_throughput_endpoint( body["budget_policy_id"] = budget_policy_id if config is not None: body["config"] = config.as_dict() + if email_notifications is not None: + body["email_notifications"] = email_notifications.as_dict() if name is not None: body["name"] = name if tags is not None: @@ -4118,11 +4178,17 @@ def create_provisioned_throughput_endpoint_and_wait( *, ai_gateway: Optional[AiGatewayConfig] = None, budget_policy_id: Optional[str] = None, + email_notifications: Optional[EmailNotifications] = None, tags: Optional[List[EndpointTag]] = None, timeout=timedelta(minutes=20), ) -> ServingEndpointDetailed: return self.create_provisioned_throughput_endpoint( - ai_gateway=ai_gateway, budget_policy_id=budget_policy_id, config=config, name=name, tags=tags + ai_gateway=ai_gateway, + budget_policy_id=budget_policy_id, + config=config, + email_notifications=email_notifications, + name=name, + tags=tags, ).result(timeout=timeout) def delete(self, name: str): @@ -4422,10 +4488,10 @@ def query( stream: Optional[bool] = None, temperature: Optional[float] = None, ) -> QueryEndpointResponse: - """Query a serving endpoint. + """Query a serving endpoint :param name: str - The name of the serving endpoint. This field is required. + The name of the serving endpoint. This field is required and is provided via the path parameter. :param dataframe_records: List[Any] (optional) Pandas Dataframe input in the records orientation. :param dataframe_split: :class:`DataframeSplitInput` (optional) @@ -4446,8 +4512,8 @@ def query( The max tokens field used ONLY for __completions__ and __chat external & foundation model__ serving endpoints. This is an integer and should only be used with other chat/completions query fields. :param messages: List[:class:`ChatMessage`] (optional) - The messages field used ONLY for __chat external & foundation model__ serving endpoints. This is a - map of strings and should only be used with other chat query fields. + The messages field used ONLY for __chat external & foundation model__ serving endpoints. This is an + array of ChatMessage objects and should only be used with other chat query fields. :param n: int (optional) The n (number of candidates) field used ONLY for __completions__ and __chat external & foundation model__ serving endpoints. This is an integer between 1 and 5 with a default of 1 and should only be @@ -4725,10 +4791,10 @@ def query( stream: Optional[bool] = None, temperature: Optional[float] = None, ) -> QueryEndpointResponse: - """Query a serving endpoint. + """Query a serving endpoint :param name: str - The name of the serving endpoint. This field is required. + The name of the serving endpoint. This field is required and is provided via the path parameter. :param dataframe_records: List[Any] (optional) Pandas Dataframe input in the records orientation. :param dataframe_split: :class:`DataframeSplitInput` (optional) @@ -4749,8 +4815,8 @@ def query( The max tokens field used ONLY for __completions__ and __chat external & foundation model__ serving endpoints. This is an integer and should only be used with other chat/completions query fields. :param messages: List[:class:`ChatMessage`] (optional) - The messages field used ONLY for __chat external & foundation model__ serving endpoints. This is a - map of strings and should only be used with other chat query fields. + The messages field used ONLY for __chat external & foundation model__ serving endpoints. This is an + array of ChatMessage objects and should only be used with other chat query fields. :param n: int (optional) The n (number of candidates) field used ONLY for __completions__ and __chat external & foundation model__ serving endpoints. This is an integer between 1 and 5 with a default of 1 and should only be diff --git a/docs/account/iam/service_principals.rst b/docs/account/iam/service_principals.rst index 78816845f..6ec4fb814 100644 --- a/docs/account/iam/service_principals.rst +++ b/docs/account/iam/service_principals.rst @@ -23,7 +23,10 @@ a = AccountClient() - spn = a.service_principals.create(display_name=f"sdk-{time.time_ns()}") + sp_create = a.service_principals.create(active=True, display_name=f"sdk-{time.time_ns()}") + + # cleanup + a.service_principals.delete(id=sp_create.id) Creates a new service principal in the Databricks account. diff --git a/docs/account/iam/workspace_assignment.rst b/docs/account/iam/workspace_assignment.rst index 2a8043172..133b16f3d 100644 --- a/docs/account/iam/workspace_assignment.rst +++ b/docs/account/iam/workspace_assignment.rst @@ -43,9 +43,9 @@ a = AccountClient() - workspace_id = os.environ["DUMMY_WORKSPACE_ID"] + workspace_id = os.environ["TEST_WORKSPACE_ID"] - all = a.workspace_assignment.list(workspace_id=workspace_id) + all = a.workspace_assignment.list(list=workspace_id) Get the permission assignments for the specified Databricks account and Databricks workspace. @@ -74,9 +74,9 @@ spn_id = spn.id - workspace_id = os.environ["TEST_WORKSPACE_ID"] + workspace_id = os.environ["DUMMY_WORKSPACE_ID"] - a.workspace_assignment.update( + _ = a.workspace_assignment.update( workspace_id=workspace_id, principal_id=spn_id, permissions=[iam.WorkspacePermission.USER], diff --git a/docs/account/provisioning/credentials.rst b/docs/account/provisioning/credentials.rst index e0103ea36..acb958c8c 100644 --- a/docs/account/provisioning/credentials.rst +++ b/docs/account/provisioning/credentials.rst @@ -24,15 +24,15 @@ a = AccountClient() - creds = a.credentials.create( + role = a.credentials.create( credentials_name=f"sdk-{time.time_ns()}", aws_credentials=provisioning.CreateCredentialAwsCredentials( - sts_role=provisioning.CreateCredentialStsRole(role_arn=os.environ["TEST_LOGDELIVERY_ARN"]) + sts_role=provisioning.CreateCredentialStsRole(role_arn=os.environ["TEST_CROSSACCOUNT_ARN"]) ), ) # cleanup - a.credentials.delete(credentials_id=creds.credentials_id) + a.credentials.delete(credentials_id=role.credentials_id) Creates a Databricks credential configuration that represents cloud cross-account credentials for a specified account. Databricks uses this to set up network infrastructure properly to host Databricks diff --git a/docs/account/provisioning/storage.rst b/docs/account/provisioning/storage.rst index 1da53fb45..a72721a6d 100644 --- a/docs/account/provisioning/storage.rst +++ b/docs/account/provisioning/storage.rst @@ -16,6 +16,7 @@ .. code-block:: + import os import time from databricks.sdk import AccountClient @@ -23,13 +24,13 @@ a = AccountClient() - bucket = a.storage.create( + storage = a.storage.create( storage_configuration_name=f"sdk-{time.time_ns()}", - root_bucket_info=provisioning.RootBucketInfo(bucket_name=f"sdk-{time.time_ns()}"), + root_bucket_info=provisioning.RootBucketInfo(bucket_name=os.environ["TEST_ROOT_BUCKET"]), ) # cleanup - a.storage.delete(storage_configuration_id=bucket.storage_configuration_id) + a.storage.delete(storage_configuration_id=storage.storage_configuration_id) Creates new storage configuration for an account, specified by ID. Uploads a storage configuration object that represents the root AWS S3 bucket in your account. Databricks stores related workspace diff --git a/docs/dbdataclasses/jobs.rst b/docs/dbdataclasses/jobs.rst index 91038c684..644497acd 100644 --- a/docs/dbdataclasses/jobs.rst +++ b/docs/dbdataclasses/jobs.rst @@ -948,6 +948,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: TableState + :members: + :undoc-members: + +.. autoclass:: TableTriggerState + :members: + :undoc-members: + .. autoclass:: TableUpdateTriggerConfiguration :members: :undoc-members: diff --git a/docs/dbdataclasses/serving.rst b/docs/dbdataclasses/serving.rst index 852991cfe..39b159395 100644 --- a/docs/dbdataclasses/serving.rst +++ b/docs/dbdataclasses/serving.rst @@ -153,6 +153,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: EmailNotifications + :members: + :undoc-members: + .. autoclass:: EmbeddingsV1ResponseEmbeddingElement :members: :undoc-members: @@ -419,20 +423,20 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: ServedModelStateDeployment - .. py:attribute:: ABORTED - :value: "ABORTED" + .. py:attribute:: DEPLOYMENT_ABORTED + :value: "DEPLOYMENT_ABORTED" - .. py:attribute:: CREATING - :value: "CREATING" + .. py:attribute:: DEPLOYMENT_CREATING + :value: "DEPLOYMENT_CREATING" - .. py:attribute:: FAILED - :value: "FAILED" + .. py:attribute:: DEPLOYMENT_FAILED + :value: "DEPLOYMENT_FAILED" - .. py:attribute:: READY - :value: "READY" + .. py:attribute:: DEPLOYMENT_READY + :value: "DEPLOYMENT_READY" - .. py:attribute:: RECOVERING - :value: "RECOVERING" + .. py:attribute:: DEPLOYMENT_RECOVERING + :value: "DEPLOYMENT_RECOVERING" .. autoclass:: ServerLogsResponse :members: diff --git a/docs/workspace/catalog/catalogs.rst b/docs/workspace/catalog/catalogs.rst index c486ab0d1..9a18ede8a 100644 --- a/docs/workspace/catalog/catalogs.rst +++ b/docs/workspace/catalog/catalogs.rst @@ -24,10 +24,10 @@ w = WorkspaceClient() - created_catalog = w.catalogs.create(name=f"sdk-{time.time_ns()}") + created = w.catalogs.create(name=f"sdk-{time.time_ns()}") # cleanup - w.catalogs.delete(name=created_catalog.name, force=True) + w.catalogs.delete(name=created.name, force=True) Creates a new catalog instance in the parent metastore if the caller is a metastore admin or has the **CREATE_CATALOG** privilege. diff --git a/docs/workspace/catalog/external_locations.rst b/docs/workspace/catalog/external_locations.rst index e7c1fd75e..624fe1958 100644 --- a/docs/workspace/catalog/external_locations.rst +++ b/docs/workspace/catalog/external_locations.rst @@ -30,20 +30,22 @@ w = WorkspaceClient() - credential = w.storage_credentials.create( + storage_credential = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + comment="created via SDK", ) - created = w.external_locations.create( + external_location = w.external_locations.create( name=f"sdk-{time.time_ns()}", - credential_name=credential.name, - url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), + credential_name=storage_credential.name, + comment="created via SDK", + url="s3://" + os.environ["TEST_BUCKET"] + "/" + f"sdk-{time.time_ns()}", ) # cleanup - w.storage_credentials.delete(name=credential.name) - w.external_locations.delete(name=created.name) + w.storage_credentials.delete(name=storage_credential.name) + w.external_locations.delete(name=external_location.name) Creates a new external location entry in the metastore. The caller must be a metastore admin or have the **CREATE_EXTERNAL_LOCATION** privilege on both the metastore and the associated storage @@ -104,20 +106,20 @@ credential = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) created = w.external_locations.create( name=f"sdk-{time.time_ns()}", credential_name=credential.name, - url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), + url=f's3://{os.environ["TEST_BUCKET"]}/sdk-{time.time_ns()}', ) - _ = w.external_locations.get(name=created.name) + _ = w.external_locations.get(get=created.name) # cleanup - w.storage_credentials.delete(name=credential.name) - w.external_locations.delete(name=created.name) + w.storage_credentials.delete(delete=credential.name) + w.external_locations.delete(delete=created.name) Gets an external location from the metastore. The caller must be either a metastore admin, the owner of the external location, or a user that has some privilege on the external location. @@ -139,10 +141,11 @@ .. code-block:: from databricks.sdk import WorkspaceClient + from databricks.sdk.service import catalog w = WorkspaceClient() - all = w.external_locations.list() + all = w.external_locations.list(catalog.ListExternalLocationsRequest()) Gets an array of external locations (__ExternalLocationInfo__ objects) from the metastore. The caller must be a metastore admin, the owner of the external location, or a user that has some privilege on diff --git a/docs/workspace/catalog/storage_credentials.rst b/docs/workspace/catalog/storage_credentials.rst index 2d4dc160c..5fe0bb70f 100644 --- a/docs/workspace/catalog/storage_credentials.rst +++ b/docs/workspace/catalog/storage_credentials.rst @@ -30,13 +30,13 @@ w = WorkspaceClient() - credential = w.storage_credentials.create( + created = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) # cleanup - w.storage_credentials.delete(name=credential.name) + w.storage_credentials.delete(delete=created.name) Creates a new storage credential. @@ -98,13 +98,13 @@ created = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) - by_name = w.storage_credentials.get(get=created.name) + by_name = w.storage_credentials.get(name=created.name) # cleanup - w.storage_credentials.delete(delete=created.name) + w.storage_credentials.delete(name=created.name) Gets a storage credential from the metastore. The caller must be a metastore admin, the owner of the storage credential, or have some permission on the storage credential. @@ -123,11 +123,10 @@ .. code-block:: from databricks.sdk import WorkspaceClient - from databricks.sdk.service import catalog w = WorkspaceClient() - all = w.storage_credentials.list(catalog.ListStorageCredentialsRequest()) + all = w.storage_credentials.list() Gets an array of storage credentials (as __StorageCredentialInfo__ objects). The array is limited to only those storage credentials the caller has permission to access. If the caller is a metastore diff --git a/docs/workspace/compute/clusters.rst b/docs/workspace/compute/clusters.rst index db78626ff..d46b8ecd0 100644 --- a/docs/workspace/compute/clusters.rst +++ b/docs/workspace/compute/clusters.rst @@ -647,10 +647,11 @@ .. code-block:: from databricks.sdk import WorkspaceClient + from databricks.sdk.service import compute w = WorkspaceClient() - nodes = w.clusters.list_node_types() + all = w.clusters.list(compute.ListClustersRequest()) Return information about all pinned and active clusters, and all clusters terminated within the last 30 days. Clusters terminated prior to this period are not included. diff --git a/docs/workspace/iam/current_user.rst b/docs/workspace/iam/current_user.rst index b2390ce63..2f95213e2 100644 --- a/docs/workspace/iam/current_user.rst +++ b/docs/workspace/iam/current_user.rst @@ -17,7 +17,7 @@ w = WorkspaceClient() - me = w.current_user.me() + me2 = w.current_user.me() Get details about the current method caller's identity. diff --git a/docs/workspace/iam/groups.rst b/docs/workspace/iam/groups.rst index 764a81ab9..737939095 100644 --- a/docs/workspace/iam/groups.rst +++ b/docs/workspace/iam/groups.rst @@ -69,9 +69,6 @@ group = w.groups.create(display_name=f"sdk-{time.time_ns()}") w.groups.delete(id=group.id) - - # cleanup - w.groups.delete(id=group.id) Deletes a group from the Databricks workspace. diff --git a/docs/workspace/iam/permissions.rst b/docs/workspace/iam/permissions.rst index 3933a1577..dae53fa2e 100644 --- a/docs/workspace/iam/permissions.rst +++ b/docs/workspace/iam/permissions.rst @@ -44,7 +44,7 @@ obj = w.workspace.get_status(path=notebook_path) - _ = w.permissions.get(request_object_type="notebooks", request_object_id="%d" % (obj.object_id)) + levels = w.permissions.get_permission_levels(request_object_type="notebooks", request_object_id="%d" % (obj.object_id)) Gets the permissions of an object. Objects can inherit permissions from their parent objects or root object. diff --git a/docs/workspace/jobs/jobs.rst b/docs/workspace/jobs/jobs.rst index d4fdba07f..d68e92a5c 100644 --- a/docs/workspace/jobs/jobs.rst +++ b/docs/workspace/jobs/jobs.rst @@ -353,23 +353,21 @@ w.clusters.ensure_cluster_is_running(os.environ["DATABRICKS_CLUSTER_ID"]) and os.environ["DATABRICKS_CLUSTER_ID"] ) - created_job = w.jobs.create( - name=f"sdk-{time.time_ns()}", + run = w.jobs.submit( + run_name=f"sdk-{time.time_ns()}", tasks=[ - jobs.Task( - description="test", + jobs.SubmitTask( existing_cluster_id=cluster_id, notebook_task=jobs.NotebookTask(notebook_path=notebook_path), - task_key="test", - timeout_seconds=0, + task_key=f"sdk-{time.time_ns()}", ) ], - ) + ).result() - by_id = w.jobs.get(job_id=created_job.job_id) + output = w.jobs.get_run_output(run_id=run.tasks[0].run_id) # cleanup - w.jobs.delete(job_id=created_job.job_id) + w.jobs.delete_run(run_id=run.run_id) Get a single job. diff --git a/docs/workspace/ml/model_registry.rst b/docs/workspace/ml/model_registry.rst index 98d803a63..2d34256e4 100644 --- a/docs/workspace/ml/model_registry.rst +++ b/docs/workspace/ml/model_registry.rst @@ -90,7 +90,7 @@ w = WorkspaceClient() - created = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") + model = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") Creates a new registered model with the name specified in the request body. Throws `RESOURCE_ALREADY_EXISTS` if a registered model with the given name exists. @@ -120,7 +120,7 @@ model = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") - created = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") + mv = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") Creates a model version. @@ -734,13 +734,14 @@ w = WorkspaceClient() - created = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") + model = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") - model = w.model_registry.get_model(name=created.registered_model.name) + created = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") - w.model_registry.update_model( - name=model.registered_model_databricks.name, + w.model_registry.update_model_version( description=f"sdk-{time.time_ns()}", + name=created.model_version.name, + version=created.model_version.version, ) Updates a registered model. diff --git a/docs/workspace/serving/serving_endpoints.rst b/docs/workspace/serving/serving_endpoints.rst index d29ca521e..3939b44e0 100644 --- a/docs/workspace/serving/serving_endpoints.rst +++ b/docs/workspace/serving/serving_endpoints.rst @@ -27,7 +27,7 @@ :returns: :class:`BuildLogsResponse` - .. py:method:: create(name: str [, ai_gateway: Optional[AiGatewayConfig], budget_policy_id: Optional[str], config: Optional[EndpointCoreConfigInput], description: Optional[str], rate_limits: Optional[List[RateLimit]], route_optimized: Optional[bool], tags: Optional[List[EndpointTag]]]) -> Wait[ServingEndpointDetailed] + .. py:method:: create(name: str [, ai_gateway: Optional[AiGatewayConfig], budget_policy_id: Optional[str], config: Optional[EndpointCoreConfigInput], description: Optional[str], email_notifications: Optional[EmailNotifications], rate_limits: Optional[List[RateLimit]], route_optimized: Optional[bool], tags: Optional[List[EndpointTag]]]) -> Wait[ServingEndpointDetailed] Create a new serving endpoint. @@ -43,6 +43,8 @@ :param config: :class:`EndpointCoreConfigInput` (optional) The core config of the serving endpoint. :param description: str (optional) + :param email_notifications: :class:`EmailNotifications` (optional) + Email notification settings. :param rate_limits: List[:class:`RateLimit`] (optional) Rate limits to be applied to the serving endpoint. NOTE: this field is deprecated, please use AI Gateway to manage rate limits. @@ -56,10 +58,10 @@ See :method:wait_get_serving_endpoint_not_updating for more details. - .. py:method:: create_and_wait(name: str [, ai_gateway: Optional[AiGatewayConfig], budget_policy_id: Optional[str], config: Optional[EndpointCoreConfigInput], description: Optional[str], rate_limits: Optional[List[RateLimit]], route_optimized: Optional[bool], tags: Optional[List[EndpointTag]], timeout: datetime.timedelta = 0:20:00]) -> ServingEndpointDetailed + .. py:method:: create_and_wait(name: str [, ai_gateway: Optional[AiGatewayConfig], budget_policy_id: Optional[str], config: Optional[EndpointCoreConfigInput], description: Optional[str], email_notifications: Optional[EmailNotifications], rate_limits: Optional[List[RateLimit]], route_optimized: Optional[bool], tags: Optional[List[EndpointTag]], timeout: datetime.timedelta = 0:20:00]) -> ServingEndpointDetailed - .. py:method:: create_provisioned_throughput_endpoint(name: str, config: PtEndpointCoreConfig [, ai_gateway: Optional[AiGatewayConfig], budget_policy_id: Optional[str], tags: Optional[List[EndpointTag]]]) -> Wait[ServingEndpointDetailed] + .. py:method:: create_provisioned_throughput_endpoint(name: str, config: PtEndpointCoreConfig [, ai_gateway: Optional[AiGatewayConfig], budget_policy_id: Optional[str], email_notifications: Optional[EmailNotifications], tags: Optional[List[EndpointTag]]]) -> Wait[ServingEndpointDetailed] Create a new PT serving endpoint. @@ -72,6 +74,8 @@ The AI Gateway configuration for the serving endpoint. :param budget_policy_id: str (optional) The budget policy associated with the endpoint. + :param email_notifications: :class:`EmailNotifications` (optional) + Email notification settings. :param tags: List[:class:`EndpointTag`] (optional) Tags to be attached to the serving endpoint and automatically propagated to billing logs. @@ -80,7 +84,7 @@ See :method:wait_get_serving_endpoint_not_updating for more details. - .. py:method:: create_provisioned_throughput_endpoint_and_wait(name: str, config: PtEndpointCoreConfig [, ai_gateway: Optional[AiGatewayConfig], budget_policy_id: Optional[str], tags: Optional[List[EndpointTag]], timeout: datetime.timedelta = 0:20:00]) -> ServingEndpointDetailed + .. py:method:: create_provisioned_throughput_endpoint_and_wait(name: str, config: PtEndpointCoreConfig [, ai_gateway: Optional[AiGatewayConfig], budget_policy_id: Optional[str], email_notifications: Optional[EmailNotifications], tags: Optional[List[EndpointTag]], timeout: datetime.timedelta = 0:20:00]) -> ServingEndpointDetailed .. py:method:: delete(name: str) @@ -243,10 +247,10 @@ .. py:method:: query(name: str [, dataframe_records: Optional[List[Any]], dataframe_split: Optional[DataframeSplitInput], extra_params: Optional[Dict[str, str]], input: Optional[Any], inputs: Optional[Any], instances: Optional[List[Any]], max_tokens: Optional[int], messages: Optional[List[ChatMessage]], n: Optional[int], prompt: Optional[Any], stop: Optional[List[str]], stream: Optional[bool], temperature: Optional[float]]) -> QueryEndpointResponse - Query a serving endpoint. + Query a serving endpoint :param name: str - The name of the serving endpoint. This field is required. + The name of the serving endpoint. This field is required and is provided via the path parameter. :param dataframe_records: List[Any] (optional) Pandas Dataframe input in the records orientation. :param dataframe_split: :class:`DataframeSplitInput` (optional) @@ -267,8 +271,8 @@ The max tokens field used ONLY for __completions__ and __chat external & foundation model__ serving endpoints. This is an integer and should only be used with other chat/completions query fields. :param messages: List[:class:`ChatMessage`] (optional) - The messages field used ONLY for __chat external & foundation model__ serving endpoints. This is a - map of strings and should only be used with other chat query fields. + The messages field used ONLY for __chat external & foundation model__ serving endpoints. This is an + array of ChatMessage objects and should only be used with other chat query fields. :param n: int (optional) The n (number of candidates) field used ONLY for __completions__ and __chat external & foundation model__ serving endpoints. This is an integer between 1 and 5 with a default of 1 and should only be diff --git a/docs/workspace/serving/serving_endpoints_data_plane.rst b/docs/workspace/serving/serving_endpoints_data_plane.rst index bb22c3dd7..9177cb295 100644 --- a/docs/workspace/serving/serving_endpoints_data_plane.rst +++ b/docs/workspace/serving/serving_endpoints_data_plane.rst @@ -9,10 +9,10 @@ .. py:method:: query(name: str [, dataframe_records: Optional[List[Any]], dataframe_split: Optional[DataframeSplitInput], extra_params: Optional[Dict[str, str]], input: Optional[Any], inputs: Optional[Any], instances: Optional[List[Any]], max_tokens: Optional[int], messages: Optional[List[ChatMessage]], n: Optional[int], prompt: Optional[Any], stop: Optional[List[str]], stream: Optional[bool], temperature: Optional[float]]) -> QueryEndpointResponse - Query a serving endpoint. + Query a serving endpoint :param name: str - The name of the serving endpoint. This field is required. + The name of the serving endpoint. This field is required and is provided via the path parameter. :param dataframe_records: List[Any] (optional) Pandas Dataframe input in the records orientation. :param dataframe_split: :class:`DataframeSplitInput` (optional) @@ -33,8 +33,8 @@ The max tokens field used ONLY for __completions__ and __chat external & foundation model__ serving endpoints. This is an integer and should only be used with other chat/completions query fields. :param messages: List[:class:`ChatMessage`] (optional) - The messages field used ONLY for __chat external & foundation model__ serving endpoints. This is a - map of strings and should only be used with other chat query fields. + The messages field used ONLY for __chat external & foundation model__ serving endpoints. This is an + array of ChatMessage objects and should only be used with other chat query fields. :param n: int (optional) The n (number of candidates) field used ONLY for __completions__ and __chat external & foundation model__ serving endpoints. This is an integer between 1 and 5 with a default of 1 and should only be diff --git a/docs/workspace/sharing/providers.rst b/docs/workspace/sharing/providers.rst index 1a7c88de9..fd81e1b24 100644 --- a/docs/workspace/sharing/providers.rst +++ b/docs/workspace/sharing/providers.rst @@ -101,25 +101,12 @@ .. code-block:: - import time - from databricks.sdk import WorkspaceClient + from databricks.sdk.service import sharing w = WorkspaceClient() - public_share_recipient = """{ - "shareCredentialsVersion":1, - "bearerToken":"dapiabcdefghijklmonpqrstuvwxyz", - "endpoint":"https://sharing.delta.io/delta-sharing/" - } - """ - - created = w.providers.create(name=f"sdk-{time.time_ns()}", recipient_profile_str=public_share_recipient) - - shares = w.providers.list_shares(name=created.name) - - # cleanup - w.providers.delete(name=created.name) + all = w.providers.list(sharing.ListProvidersRequest()) Gets an array of available authentication providers. The caller must either be a metastore admin or the owner of the providers. Providers not owned by the caller are not included in the response. There diff --git a/docs/workspace/sql/queries.rst b/docs/workspace/sql/queries.rst index 0dfb63fbf..f0081b3f2 100644 --- a/docs/workspace/sql/queries.rst +++ b/docs/workspace/sql/queries.rst @@ -29,7 +29,7 @@ display_name=f"sdk-{time.time_ns()}", warehouse_id=srcs[0].warehouse_id, description="test query from Go SDK", - query_text="SELECT 1", + query_text="SHOW TABLES", ) ) diff --git a/docs/workspace/workspace/workspace.rst b/docs/workspace/workspace/workspace.rst index 03dae035c..4fba581e8 100644 --- a/docs/workspace/workspace/workspace.rst +++ b/docs/workspace/workspace/workspace.rst @@ -178,7 +178,7 @@ content=base64.b64encode(("CREATE LIVE TABLE dlt_sample AS SELECT 1").encode()).decode(), format=workspace.ImportFormat.SOURCE, language=workspace.Language.SQL, - overwrite=true_, + overwrite=True, path=notebook_path, )