diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 7a9cd634a..cbb2ed4ae 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -9b38571bfe7bf0bc595480f28eb93a8db3116985 \ No newline at end of file +8921a828d1741af0952eb5c4f0292c194c0d5f38 \ No newline at end of file diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 7d0079d78..202bd175c 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -16,3 +16,16 @@ * Add `table_deltasharing_open_dir_based` enum value for `databricks.sdk.service.catalog.SecurableKind`. * Add `creating` and `create_failed` enum values for `databricks.sdk.service.settings.NccPrivateEndpointRulePrivateLinkConnectionState`. * [Breaking] Remove `access_modes` and `storage_location` fields for `databricks.sdk.service.sharing.Table`. +* Add `error_message` field for `databricks.sdk.service.settings.CreatePrivateEndpointRule`. +* Add `error_message` field for `databricks.sdk.service.settings.NccPrivateEndpointRule`. +* Add `error_message` field for `databricks.sdk.service.settings.UpdatePrivateEndpointRule`. +* Add `rate_limited` enum value for `databricks.sdk.service.compute.TerminationReasonCode`. +* Add `rate_limited` enum value for `databricks.sdk.service.sql.TerminationReasonCode`. +* [Breaking] Add long-running operation configuration for [PostgresAPI.delete_branch](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/postgres/postgres.html#databricks.sdk.service.postgres.PostgresAPI.delete_branch) method. +* [Breaking] Add long-running operation configuration for [PostgresAPI.delete_endpoint](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/postgres/postgres.html#databricks.sdk.service.postgres.PostgresAPI.delete_endpoint) method. +* [Breaking] Add long-running operation configuration for [PostgresAPI.delete_project](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/postgres/postgres.html#databricks.sdk.service.postgres.PostgresAPI.delete_project) method. +* [Breaking] Change `delete_branch()`, `delete_endpoint()` and `delete_project()` methods for [w.postgres](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/postgres/postgres.html) workspace-level service to return `databricks.sdk.service.postgres.Operation` dataclass. +* [Breaking] Remove `pgbouncer_settings` field for `databricks.sdk.service.postgres.EndpointSettings`. +* [Breaking] Remove `pooler_mode` field for `databricks.sdk.service.postgres.EndpointSpec`. +* [Breaking] Remove `pooler_mode` field for `databricks.sdk.service.postgres.EndpointStatus`. +* [Breaking] Remove `pgbouncer_settings` field for `databricks.sdk.service.postgres.ProjectDefaultEndpointSettings`. \ No newline at end of file diff --git a/databricks/sdk/__init__.py b/databricks/sdk/__init__.py index dcf680a8c..772c4336b 100644 --- a/databricks/sdk/__init__.py +++ b/databricks/sdk/__init__.py @@ -441,7 +441,7 @@ def alerts_v2(self) -> pkg_sql.AlertsV2API: @property def apps(self) -> pkg_apps.AppsAPI: - """Apps run directly on a customer’s Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on.""" + """Apps run directly on a customer's Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on.""" return self._apps @property @@ -796,7 +796,7 @@ def providers(self) -> pkg_sharing.ProvidersAPI: @property def quality_monitor_v2(self) -> pkg_qualitymonitorv2.QualityMonitorV2API: - """Manage data quality of UC objects (currently support `schema`).""" + """[DEPRECATED] This API is deprecated.""" return self._quality_monitor_v2 @property diff --git a/databricks/sdk/service/apps.py b/databricks/sdk/service/apps.py index f0855a454..2011942ea 100644 --- a/databricks/sdk/service/apps.py +++ b/databricks/sdk/service/apps.py @@ -2019,7 +2019,7 @@ def from_dict(cls, d: Dict[str, Any]) -> ListCustomTemplatesResponse: class AppsAPI: - """Apps run directly on a customer’s Databricks instance, integrate with their data, use and extend + """Apps run directly on a customer's Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on.""" def __init__(self, api_client): diff --git a/databricks/sdk/service/compute.py b/databricks/sdk/service/compute.py index 7c1834665..b6da68ddb 100644 --- a/databricks/sdk/service/compute.py +++ b/databricks/sdk/service/compute.py @@ -7195,6 +7195,7 @@ class TerminationReasonCode(Enum): NPIP_TUNNEL_TOKEN_FAILURE = "NPIP_TUNNEL_TOKEN_FAILURE" POD_ASSIGNMENT_FAILURE = "POD_ASSIGNMENT_FAILURE" POD_SCHEDULING_FAILURE = "POD_SCHEDULING_FAILURE" + RATE_LIMITED = "RATE_LIMITED" REQUEST_REJECTED = "REQUEST_REJECTED" REQUEST_THROTTLED = "REQUEST_THROTTLED" RESOURCE_USAGE_BLOCKED = "RESOURCE_USAGE_BLOCKED" diff --git a/databricks/sdk/service/database.py b/databricks/sdk/service/database.py old mode 100755 new mode 100644 index 3487bd80c..722bc9643 --- a/databricks/sdk/service/database.py +++ b/databricks/sdk/service/database.py @@ -164,30 +164,46 @@ class DatabaseInstance: responses.""" effective_capacity: Optional[str] = None - """Deprecated. The sku of the instance; this field will always match the value of capacity.""" + """Deprecated. The sku of the instance; this field will always match the value of capacity. This is + an output only field that contains the value computed from the input field combined with server + side defaults. Use the field without the effective_ prefix to set the value.""" effective_custom_tags: Optional[List[CustomTag]] = None - """The recorded custom tags associated with the instance.""" + """The recorded custom tags associated with the instance. This is an output only field that + contains the value computed from the input field combined with server side defaults. Use the + field without the effective_ prefix to set the value.""" effective_enable_pg_native_login: Optional[bool] = None - """Whether the instance has PG native password login enabled.""" + """Whether the instance has PG native password login enabled. This is an output only field that + contains the value computed from the input field combined with server side defaults. Use the + field without the effective_ prefix to set the value.""" effective_enable_readable_secondaries: Optional[bool] = None - """Whether secondaries serving read-only traffic are enabled. Defaults to false.""" + """Whether secondaries serving read-only traffic are enabled. Defaults to false. This is an output + only field that contains the value computed from the input field combined with server side + defaults. Use the field without the effective_ prefix to set the value.""" effective_node_count: Optional[int] = None """The number of nodes in the instance, composed of 1 primary and 0 or more secondaries. Defaults - to 1 primary and 0 secondaries.""" + to 1 primary and 0 secondaries. This is an output only field that contains the value computed + from the input field combined with server side defaults. Use the field without the effective_ + prefix to set the value.""" effective_retention_window_in_days: Optional[int] = None """The retention window for the instance. This is the time window in days for which the historical - data is retained.""" + data is retained. This is an output only field that contains the value computed from the input + field combined with server side defaults. Use the field without the effective_ prefix to set the + value.""" effective_stopped: Optional[bool] = None - """Whether the instance is stopped.""" + """Whether the instance is stopped. This is an output only field that contains the value computed + from the input field combined with server side defaults. Use the field without the effective_ + prefix to set the value.""" effective_usage_policy_id: Optional[str] = None - """The policy that is applied to the instance.""" + """The policy that is applied to the instance. This is an output only field that contains the value + computed from the input field combined with server side defaults. Use the field without the + effective_ prefix to set the value.""" enable_pg_native_login: Optional[bool] = None """Whether to enable PG native password login on the instance. Defaults to false.""" @@ -397,7 +413,9 @@ class DatabaseInstanceRef: effective_lsn: Optional[str] = None """For a parent ref instance, this is the LSN on the parent instance from which the instance was created. For a child ref instance, this is the LSN on the instance from which the child instance - was created.""" + was created. This is an output only field that contains the value computed from the input field + combined with server side defaults. Use the field without the effective_ prefix to set the + value.""" lsn: Optional[str] = None """User-specified WAL LSN of the ref database instance. @@ -464,7 +482,9 @@ class DatabaseInstanceRole: """The desired API-exposed Postgres role attribute to associate with the role. Optional.""" effective_attributes: Optional[DatabaseInstanceRoleAttributes] = None - """The attributes that are applied to the role.""" + """The attributes that are applied to the role. This is an output only field that contains the + value computed from the input field combined with server side defaults. Use the field without + the effective_ prefix to set the value.""" identity_type: Optional[DatabaseInstanceRoleIdentityType] = None """The type of the role.""" @@ -979,10 +999,14 @@ class SyncedDatabaseTable: effective_database_instance_name: Optional[str] = None """The name of the database instance that this table is registered to. This field is always returned, and for tables inside database catalogs is inferred database instance associated with - the catalog.""" + the catalog. This is an output only field that contains the value computed from the input field + combined with server side defaults. Use the field without the effective_ prefix to set the + value.""" effective_logical_database_name: Optional[str] = None - """The name of the logical database that this table is registered to.""" + """The name of the logical database that this table is registered to. This is an output only field + that contains the value computed from the input field combined with server side defaults. Use + the field without the effective_ prefix to set the value.""" logical_database_name: Optional[str] = None """Target Postgres database object (logical database) name for this table. diff --git a/databricks/sdk/service/postgres.py b/databricks/sdk/service/postgres.py index 435d69b74..397a9d722 100644 --- a/databricks/sdk/service/postgres.py +++ b/databricks/sdk/service/postgres.py @@ -282,7 +282,6 @@ class DatabricksServiceExceptionWithDetailsProto: """Databricks Error that is returned by all Databricks APIs.""" details: Optional[List[dict]] = None - """@pbjson-skip""" error_code: Optional[ErrorCode] = None @@ -421,12 +420,6 @@ def from_dict(cls, d: Dict[str, Any]) -> EndpointOperationMetadata: return cls() -class EndpointPoolerMode(Enum): - """The connection pooler mode. Lakebase supports PgBouncer in `transaction` mode only.""" - - TRANSACTION = "TRANSACTION" - - @dataclass class EndpointSettings: """A collection of settings for a compute endpoint.""" @@ -434,16 +427,11 @@ class EndpointSettings: pg_settings: Optional[Dict[str, str]] = None """A raw representation of Postgres settings.""" - pgbouncer_settings: Optional[Dict[str, str]] = None - """A raw representation of PgBouncer settings.""" - def as_dict(self) -> dict: """Serializes the EndpointSettings into a dictionary suitable for use as a JSON request body.""" body = {} if self.pg_settings: body["pg_settings"] = self.pg_settings - if self.pgbouncer_settings: - body["pgbouncer_settings"] = self.pgbouncer_settings return body def as_shallow_dict(self) -> dict: @@ -451,14 +439,12 @@ def as_shallow_dict(self) -> dict: body = {} if self.pg_settings: body["pg_settings"] = self.pg_settings - if self.pgbouncer_settings: - body["pgbouncer_settings"] = self.pgbouncer_settings return body @classmethod def from_dict(cls, d: Dict[str, Any]) -> EndpointSettings: """Deserializes the EndpointSettings from a dictionary.""" - return cls(pg_settings=d.get("pg_settings", None), pgbouncer_settings=d.get("pgbouncer_settings", None)) + return cls(pg_settings=d.get("pg_settings", None)) @dataclass @@ -477,8 +463,6 @@ class EndpointSpec: suspend compute operation. A disabled compute endpoint cannot be enabled by a connection or console action.""" - pooler_mode: Optional[EndpointPoolerMode] = None - settings: Optional[EndpointSettings] = None suspend_timeout_duration: Optional[Duration] = None @@ -495,8 +479,6 @@ def as_dict(self) -> dict: body["disabled"] = self.disabled if self.endpoint_type is not None: body["endpoint_type"] = self.endpoint_type.value - if self.pooler_mode is not None: - body["pooler_mode"] = self.pooler_mode.value if self.settings: body["settings"] = self.settings.as_dict() if self.suspend_timeout_duration is not None: @@ -514,8 +496,6 @@ def as_shallow_dict(self) -> dict: body["disabled"] = self.disabled if self.endpoint_type is not None: body["endpoint_type"] = self.endpoint_type - if self.pooler_mode is not None: - body["pooler_mode"] = self.pooler_mode if self.settings: body["settings"] = self.settings if self.suspend_timeout_duration is not None: @@ -530,7 +510,6 @@ def from_dict(cls, d: Dict[str, Any]) -> EndpointSpec: autoscaling_limit_min_cu=d.get("autoscaling_limit_min_cu", None), disabled=d.get("disabled", None), endpoint_type=_enum(d, "endpoint_type", EndpointType), - pooler_mode=_enum(d, "pooler_mode", EndpointPoolerMode), settings=_from_dict(d, "settings", EndpointSettings), suspend_timeout_duration=_duration(d, "suspend_timeout_duration"), ) @@ -563,8 +542,6 @@ class EndpointStatus: pending_state: Optional[EndpointStatusState] = None - pooler_mode: Optional[EndpointPoolerMode] = None - settings: Optional[EndpointSettings] = None start_time: Optional[Timestamp] = None @@ -595,8 +572,6 @@ def as_dict(self) -> dict: body["last_active_time"] = self.last_active_time.ToJsonString() if self.pending_state is not None: body["pending_state"] = self.pending_state.value - if self.pooler_mode is not None: - body["pooler_mode"] = self.pooler_mode.value if self.settings: body["settings"] = self.settings.as_dict() if self.start_time is not None: @@ -626,8 +601,6 @@ def as_shallow_dict(self) -> dict: body["last_active_time"] = self.last_active_time if self.pending_state is not None: body["pending_state"] = self.pending_state - if self.pooler_mode is not None: - body["pooler_mode"] = self.pooler_mode if self.settings: body["settings"] = self.settings if self.start_time is not None: @@ -650,7 +623,6 @@ def from_dict(cls, d: Dict[str, Any]) -> EndpointStatus: host=d.get("host", None), last_active_time=_timestamp(d, "last_active_time"), pending_state=_enum(d, "pending_state", EndpointStatusState), - pooler_mode=_enum(d, "pooler_mode", EndpointPoolerMode), settings=_from_dict(d, "settings", EndpointSettings), start_time=_timestamp(d, "start_time"), suspend_time=_timestamp(d, "suspend_time"), @@ -1035,9 +1007,6 @@ class ProjectDefaultEndpointSettings: pg_settings: Optional[Dict[str, str]] = None """A raw representation of Postgres settings.""" - pgbouncer_settings: Optional[Dict[str, str]] = None - """A raw representation of PgBouncer settings.""" - suspend_timeout_duration: Optional[Duration] = None """Duration of inactivity after which the compute endpoint is automatically suspended.""" @@ -1050,8 +1019,6 @@ def as_dict(self) -> dict: body["autoscaling_limit_min_cu"] = self.autoscaling_limit_min_cu if self.pg_settings: body["pg_settings"] = self.pg_settings - if self.pgbouncer_settings: - body["pgbouncer_settings"] = self.pgbouncer_settings if self.suspend_timeout_duration is not None: body["suspend_timeout_duration"] = self.suspend_timeout_duration.ToJsonString() return body @@ -1065,8 +1032,6 @@ def as_shallow_dict(self) -> dict: body["autoscaling_limit_min_cu"] = self.autoscaling_limit_min_cu if self.pg_settings: body["pg_settings"] = self.pg_settings - if self.pgbouncer_settings: - body["pgbouncer_settings"] = self.pgbouncer_settings if self.suspend_timeout_duration is not None: body["suspend_timeout_duration"] = self.suspend_timeout_duration return body @@ -1078,7 +1043,6 @@ def from_dict(cls, d: Dict[str, Any]) -> ProjectDefaultEndpointSettings: autoscaling_limit_max_cu=d.get("autoscaling_limit_max_cu", None), autoscaling_limit_min_cu=d.get("autoscaling_limit_min_cu", None), pg_settings=d.get("pg_settings", None), - pgbouncer_settings=d.get("pgbouncer_settings", None), suspend_timeout_duration=_duration(d, "suspend_timeout_duration"), ) @@ -1567,51 +1531,57 @@ def create_role(self, parent: str, role: Role, role_id: str) -> CreateRoleOperat operation = Operation.from_dict(res) return CreateRoleOperation(self, operation) - def delete_branch(self, name: str): + def delete_branch(self, name: str) -> DeleteBranchOperation: """Delete a Branch. :param name: str The name of the Branch to delete. Format: projects/{project_id}/branches/{branch_id} - + :returns: :class:`Operation` """ headers = { "Accept": "application/json", } - self._api.do("DELETE", f"/api/2.0/postgres/{name}", headers=headers) + res = self._api.do("DELETE", f"/api/2.0/postgres/{name}", headers=headers) + operation = Operation.from_dict(res) + return DeleteBranchOperation(self, operation) - def delete_endpoint(self, name: str): + def delete_endpoint(self, name: str) -> DeleteEndpointOperation: """Delete an Endpoint. :param name: str The name of the Endpoint to delete. Format: projects/{project_id}/branches/{branch_id}/endpoints/{endpoint_id} - + :returns: :class:`Operation` """ headers = { "Accept": "application/json", } - self._api.do("DELETE", f"/api/2.0/postgres/{name}", headers=headers) + res = self._api.do("DELETE", f"/api/2.0/postgres/{name}", headers=headers) + operation = Operation.from_dict(res) + return DeleteEndpointOperation(self, operation) - def delete_project(self, name: str): + def delete_project(self, name: str) -> DeleteProjectOperation: """Delete a Project. :param name: str The name of the Project to delete. Format: projects/{project_id} - + :returns: :class:`Operation` """ headers = { "Accept": "application/json", } - self._api.do("DELETE", f"/api/2.0/postgres/{name}", headers=headers) + res = self._api.do("DELETE", f"/api/2.0/postgres/{name}", headers=headers) + operation = Operation.from_dict(res) + return DeleteProjectOperation(self, operation) def delete_role(self, name: str, *, reassign_owned_to: Optional[str] = None) -> DeleteRoleOperation: """Delete a role in a branch. @@ -2248,6 +2218,231 @@ def done(self) -> bool: return operation.done +class DeleteBranchOperation: + """Long-running operation for delete_branch""" + + def __init__(self, impl: PostgresAPI, operation: Operation): + self._impl = impl + self._operation = operation + + def wait(self, opts: Optional[lro.LroOptions] = None): + """Wait blocks until the long-running operation is completed. If no timeout is + specified, this will poll indefinitely. If a timeout is provided and the operation + didn't finish within the timeout, this function will raise an error of type + TimeoutError, otherwise returns successful response and any errors encountered. + + :param opts: :class:`LroOptions` + Timeout options (default: polls indefinitely) + + :returns: :class:`Any /* MISSING TYPE */` + """ + + def poll_operation(): + operation = self._impl.get_operation(name=self._operation.name) + + # Update local operation state + self._operation = operation + + if not operation.done: + return None, RetryError.continues("operation still in progress") + + if operation.error: + error_msg = operation.error.message if operation.error.message else "unknown error" + if operation.error.error_code: + error_msg = f"[{operation.error.error_code}] {error_msg}" + return None, RetryError.halt(Exception(f"operation failed: {error_msg}")) + + # Operation completed successfully, unmarshal response. + if operation.response is None: + return None, RetryError.halt(Exception("operation completed but no response available")) + + return {}, None + + poll(poll_operation, timeout=opts.timeout if opts is not None else None) + + def name(self) -> str: + """Name returns the name of the long-running operation. The name is assigned + by the server and is unique within the service from which the operation is created. + + :returns: str + """ + return self._operation.name + + def metadata(self) -> BranchOperationMetadata: + """Metadata returns metadata associated with the long-running operation. + If the metadata is not available, the returned metadata is None. + + :returns: :class:`BranchOperationMetadata` or None + """ + if self._operation.metadata is None: + return None + + return BranchOperationMetadata.from_dict(self._operation.metadata) + + def done(self) -> bool: + """Done reports whether the long-running operation has completed. + + :returns: bool + """ + # Refresh the operation state first + operation = self._impl.get_operation(name=self._operation.name) + + # Update local operation state + self._operation = operation + + return operation.done + + +class DeleteEndpointOperation: + """Long-running operation for delete_endpoint""" + + def __init__(self, impl: PostgresAPI, operation: Operation): + self._impl = impl + self._operation = operation + + def wait(self, opts: Optional[lro.LroOptions] = None): + """Wait blocks until the long-running operation is completed. If no timeout is + specified, this will poll indefinitely. If a timeout is provided and the operation + didn't finish within the timeout, this function will raise an error of type + TimeoutError, otherwise returns successful response and any errors encountered. + + :param opts: :class:`LroOptions` + Timeout options (default: polls indefinitely) + + :returns: :class:`Any /* MISSING TYPE */` + """ + + def poll_operation(): + operation = self._impl.get_operation(name=self._operation.name) + + # Update local operation state + self._operation = operation + + if not operation.done: + return None, RetryError.continues("operation still in progress") + + if operation.error: + error_msg = operation.error.message if operation.error.message else "unknown error" + if operation.error.error_code: + error_msg = f"[{operation.error.error_code}] {error_msg}" + return None, RetryError.halt(Exception(f"operation failed: {error_msg}")) + + # Operation completed successfully, unmarshal response. + if operation.response is None: + return None, RetryError.halt(Exception("operation completed but no response available")) + + return {}, None + + poll(poll_operation, timeout=opts.timeout if opts is not None else None) + + def name(self) -> str: + """Name returns the name of the long-running operation. The name is assigned + by the server and is unique within the service from which the operation is created. + + :returns: str + """ + return self._operation.name + + def metadata(self) -> EndpointOperationMetadata: + """Metadata returns metadata associated with the long-running operation. + If the metadata is not available, the returned metadata is None. + + :returns: :class:`EndpointOperationMetadata` or None + """ + if self._operation.metadata is None: + return None + + return EndpointOperationMetadata.from_dict(self._operation.metadata) + + def done(self) -> bool: + """Done reports whether the long-running operation has completed. + + :returns: bool + """ + # Refresh the operation state first + operation = self._impl.get_operation(name=self._operation.name) + + # Update local operation state + self._operation = operation + + return operation.done + + +class DeleteProjectOperation: + """Long-running operation for delete_project""" + + def __init__(self, impl: PostgresAPI, operation: Operation): + self._impl = impl + self._operation = operation + + def wait(self, opts: Optional[lro.LroOptions] = None): + """Wait blocks until the long-running operation is completed. If no timeout is + specified, this will poll indefinitely. If a timeout is provided and the operation + didn't finish within the timeout, this function will raise an error of type + TimeoutError, otherwise returns successful response and any errors encountered. + + :param opts: :class:`LroOptions` + Timeout options (default: polls indefinitely) + + :returns: :class:`Any /* MISSING TYPE */` + """ + + def poll_operation(): + operation = self._impl.get_operation(name=self._operation.name) + + # Update local operation state + self._operation = operation + + if not operation.done: + return None, RetryError.continues("operation still in progress") + + if operation.error: + error_msg = operation.error.message if operation.error.message else "unknown error" + if operation.error.error_code: + error_msg = f"[{operation.error.error_code}] {error_msg}" + return None, RetryError.halt(Exception(f"operation failed: {error_msg}")) + + # Operation completed successfully, unmarshal response. + if operation.response is None: + return None, RetryError.halt(Exception("operation completed but no response available")) + + return {}, None + + poll(poll_operation, timeout=opts.timeout if opts is not None else None) + + def name(self) -> str: + """Name returns the name of the long-running operation. The name is assigned + by the server and is unique within the service from which the operation is created. + + :returns: str + """ + return self._operation.name + + def metadata(self) -> ProjectOperationMetadata: + """Metadata returns metadata associated with the long-running operation. + If the metadata is not available, the returned metadata is None. + + :returns: :class:`ProjectOperationMetadata` or None + """ + if self._operation.metadata is None: + return None + + return ProjectOperationMetadata.from_dict(self._operation.metadata) + + def done(self) -> bool: + """Done reports whether the long-running operation has completed. + + :returns: bool + """ + # Refresh the operation state first + operation = self._impl.get_operation(name=self._operation.name) + + # Update local operation state + self._operation = operation + + return operation.done + + class DeleteRoleOperation: """Long-running operation for delete_role""" diff --git a/databricks/sdk/service/qualitymonitorv2.py b/databricks/sdk/service/qualitymonitorv2.py index 2e0c00008..13dfae0b5 100644 --- a/databricks/sdk/service/qualitymonitorv2.py +++ b/databricks/sdk/service/qualitymonitorv2.py @@ -147,13 +147,14 @@ def from_dict(cls, d: Dict[str, Any]) -> QualityMonitor: class QualityMonitorV2API: - """Manage data quality of UC objects (currently support `schema`)""" + """[DEPRECATED] This API is deprecated. Please use the Data Quality Monitoring API instead (REST: + /api/data-quality/v1/monitors). Manage data quality of UC objects (currently support `schema`).""" def __init__(self, api_client): self._api = api_client def create_quality_monitor(self, quality_monitor: QualityMonitor) -> QualityMonitor: - """Create a quality monitor on UC object + """[DEPRECATED] Create a quality monitor on UC object. Use Data Quality Monitoring API instead. :param quality_monitor: :class:`QualityMonitor` @@ -170,7 +171,7 @@ def create_quality_monitor(self, quality_monitor: QualityMonitor) -> QualityMoni return QualityMonitor.from_dict(res) def delete_quality_monitor(self, object_type: str, object_id: str): - """Delete a quality monitor on UC object + """[DEPRECATED] Delete a quality monitor on UC object. Use Data Quality Monitoring API instead. :param object_type: str The type of the monitored object. Can be one of the following: schema. @@ -187,7 +188,7 @@ def delete_quality_monitor(self, object_type: str, object_id: str): self._api.do("DELETE", f"/api/2.0/quality-monitors/{object_type}/{object_id}", headers=headers) def get_quality_monitor(self, object_type: str, object_id: str) -> QualityMonitor: - """Read a quality monitor on UC object + """[DEPRECATED] Read a quality monitor on UC object. Use Data Quality Monitoring API instead. :param object_type: str The type of the monitored object. Can be one of the following: schema. @@ -207,7 +208,7 @@ def get_quality_monitor(self, object_type: str, object_id: str) -> QualityMonito def list_quality_monitor( self, *, page_size: Optional[int] = None, page_token: Optional[str] = None ) -> Iterator[QualityMonitor]: - """(Unimplemented) List quality monitors + """[DEPRECATED] (Unimplemented) List quality monitors. Use Data Quality Monitoring API instead. :param page_size: int (optional) :param page_token: str (optional) @@ -236,7 +237,8 @@ def list_quality_monitor( def update_quality_monitor( self, object_type: str, object_id: str, quality_monitor: QualityMonitor ) -> QualityMonitor: - """(Unimplemented) Update a quality monitor on UC object + """[DEPRECATED] (Unimplemented) Update a quality monitor on UC object. Use Data Quality Monitoring API + instead. :param object_type: str The type of the monitored object. Can be one of the following: schema. diff --git a/databricks/sdk/service/settings.py b/databricks/sdk/service/settings.py index e9263267b..9acb5af4e 100644 --- a/databricks/sdk/service/settings.py +++ b/databricks/sdk/service/settings.py @@ -846,6 +846,8 @@ class CreatePrivateEndpointRule: """The full target AWS endpoint service name that connects to the destination resources of the private endpoint.""" + error_message: Optional[str] = None + group_id: Optional[str] = None """Not used by customer-managed private endpoint services. @@ -869,6 +871,8 @@ def as_dict(self) -> dict: body["domain_names"] = [v for v in self.domain_names] if self.endpoint_service is not None: body["endpoint_service"] = self.endpoint_service + if self.error_message is not None: + body["error_message"] = self.error_message if self.group_id is not None: body["group_id"] = self.group_id if self.resource_id is not None: @@ -884,6 +888,8 @@ def as_shallow_dict(self) -> dict: body["domain_names"] = self.domain_names if self.endpoint_service is not None: body["endpoint_service"] = self.endpoint_service + if self.error_message is not None: + body["error_message"] = self.error_message if self.group_id is not None: body["group_id"] = self.group_id if self.resource_id is not None: @@ -898,6 +904,7 @@ def from_dict(cls, d: Dict[str, Any]) -> CreatePrivateEndpointRule: return cls( domain_names=d.get("domain_names", None), endpoint_service=d.get("endpoint_service", None), + error_message=d.get("error_message", None), group_id=d.get("group_id", None), resource_id=d.get("resource_id", None), resource_names=d.get("resource_names", None), @@ -4058,6 +4065,8 @@ class NccPrivateEndpointRule: """The full target AWS endpoint service name that connects to the destination resources of the private endpoint.""" + error_message: Optional[str] = None + group_id: Optional[str] = None """Not used by customer-managed private endpoint services. @@ -4108,6 +4117,8 @@ def as_dict(self) -> dict: body["endpoint_name"] = self.endpoint_name if self.endpoint_service is not None: body["endpoint_service"] = self.endpoint_service + if self.error_message is not None: + body["error_message"] = self.error_message if self.group_id is not None: body["group_id"] = self.group_id if self.network_connectivity_config_id is not None: @@ -4145,6 +4156,8 @@ def as_shallow_dict(self) -> dict: body["endpoint_name"] = self.endpoint_name if self.endpoint_service is not None: body["endpoint_service"] = self.endpoint_service + if self.error_message is not None: + body["error_message"] = self.error_message if self.group_id is not None: body["group_id"] = self.group_id if self.network_connectivity_config_id is not None: @@ -4174,6 +4187,7 @@ def from_dict(cls, d: Dict[str, Any]) -> NccPrivateEndpointRule: enabled=d.get("enabled", None), endpoint_name=d.get("endpoint_name", None), endpoint_service=d.get("endpoint_service", None), + error_message=d.get("error_message", None), group_id=d.get("group_id", None), network_connectivity_config_id=d.get("network_connectivity_config_id", None), resource_id=d.get("resource_id", None), @@ -5143,6 +5157,8 @@ class UpdatePrivateEndpointRule: Update this field to activate/deactivate this private endpoint to allow egress access from serverless compute resources.""" + error_message: Optional[str] = None + resource_names: Optional[List[str]] = None """Only used by private endpoints towards AWS S3 service. @@ -5157,6 +5173,8 @@ def as_dict(self) -> dict: body["domain_names"] = [v for v in self.domain_names] if self.enabled is not None: body["enabled"] = self.enabled + if self.error_message is not None: + body["error_message"] = self.error_message if self.resource_names: body["resource_names"] = [v for v in self.resource_names] return body @@ -5168,6 +5186,8 @@ def as_shallow_dict(self) -> dict: body["domain_names"] = self.domain_names if self.enabled is not None: body["enabled"] = self.enabled + if self.error_message is not None: + body["error_message"] = self.error_message if self.resource_names: body["resource_names"] = self.resource_names return body @@ -5178,6 +5198,7 @@ def from_dict(cls, d: Dict[str, Any]) -> UpdatePrivateEndpointRule: return cls( domain_names=d.get("domain_names", None), enabled=d.get("enabled", None), + error_message=d.get("error_message", None), resource_names=d.get("resource_names", None), ) diff --git a/databricks/sdk/service/sql.py b/databricks/sdk/service/sql.py index 78c8688d0..51cb09e2d 100644 --- a/databricks/sdk/service/sql.py +++ b/databricks/sdk/service/sql.py @@ -6392,6 +6392,7 @@ class TerminationReasonCode(Enum): NPIP_TUNNEL_TOKEN_FAILURE = "NPIP_TUNNEL_TOKEN_FAILURE" POD_ASSIGNMENT_FAILURE = "POD_ASSIGNMENT_FAILURE" POD_SCHEDULING_FAILURE = "POD_SCHEDULING_FAILURE" + RATE_LIMITED = "RATE_LIMITED" REQUEST_REJECTED = "REQUEST_REJECTED" REQUEST_THROTTLED = "REQUEST_THROTTLED" RESOURCE_USAGE_BLOCKED = "RESOURCE_USAGE_BLOCKED" diff --git a/docs/account/iam/workspace_assignment.rst b/docs/account/iam/workspace_assignment.rst index 133b16f3d..ca78b86df 100644 --- a/docs/account/iam/workspace_assignment.rst +++ b/docs/account/iam/workspace_assignment.rst @@ -74,9 +74,9 @@ spn_id = spn.id - workspace_id = os.environ["DUMMY_WORKSPACE_ID"] + workspace_id = os.environ["TEST_WORKSPACE_ID"] - _ = a.workspace_assignment.update( + a.workspace_assignment.update( workspace_id=workspace_id, principal_id=spn_id, permissions=[iam.WorkspacePermission.USER], diff --git a/docs/account/provisioning/storage.rst b/docs/account/provisioning/storage.rst index 25ee5abaa..b9f080e36 100644 --- a/docs/account/provisioning/storage.rst +++ b/docs/account/provisioning/storage.rst @@ -16,6 +16,7 @@ .. code-block:: + import os import time from databricks.sdk import AccountClient @@ -25,8 +26,11 @@ storage = a.storage.create( storage_configuration_name=f"sdk-{time.time_ns()}", - root_bucket_info=provisioning.RootBucketInfo(bucket_name=f"sdk-{time.time_ns()}"), + root_bucket_info=provisioning.RootBucketInfo(bucket_name=os.environ["TEST_ROOT_BUCKET"]), ) + + # cleanup + a.storage.delete(storage_configuration_id=storage.storage_configuration_id) Creates a Databricks storage configuration for an account. diff --git a/docs/dbdataclasses/compute.rst b/docs/dbdataclasses/compute.rst index 6009322a0..c946f8e32 100644 --- a/docs/dbdataclasses/compute.rst +++ b/docs/dbdataclasses/compute.rst @@ -1462,6 +1462,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: POD_SCHEDULING_FAILURE :value: "POD_SCHEDULING_FAILURE" + .. py:attribute:: RATE_LIMITED + :value: "RATE_LIMITED" + .. py:attribute:: REQUEST_REJECTED :value: "REQUEST_REJECTED" diff --git a/docs/dbdataclasses/postgres.rst b/docs/dbdataclasses/postgres.rst index 00e697654..d3605a58e 100644 --- a/docs/dbdataclasses/postgres.rst +++ b/docs/dbdataclasses/postgres.rst @@ -51,13 +51,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. py:class:: EndpointPoolerMode - - The connection pooler mode. Lakebase supports PgBouncer in `transaction` mode only. - - .. py:attribute:: TRANSACTION - :value: "TRANSACTION" - .. autoclass:: EndpointSettings :members: :undoc-members: diff --git a/docs/dbdataclasses/sql.rst b/docs/dbdataclasses/sql.rst index a5673b8f8..bd34d9227 100644 --- a/docs/dbdataclasses/sql.rst +++ b/docs/dbdataclasses/sql.rst @@ -1542,6 +1542,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: POD_SCHEDULING_FAILURE :value: "POD_SCHEDULING_FAILURE" + .. py:attribute:: RATE_LIMITED + :value: "RATE_LIMITED" + .. py:attribute:: REQUEST_REJECTED :value: "REQUEST_REJECTED" diff --git a/docs/workspace/apps/apps.rst b/docs/workspace/apps/apps.rst index 62cc0c22d..462ddd43a 100644 --- a/docs/workspace/apps/apps.rst +++ b/docs/workspace/apps/apps.rst @@ -4,7 +4,7 @@ .. py:class:: AppsAPI - Apps run directly on a customer’s Databricks instance, integrate with their data, use and extend + Apps run directly on a customer's Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on. .. py:method:: create(app: App [, no_compute: Optional[bool]]) -> Wait[App] diff --git a/docs/workspace/catalog/catalogs.rst b/docs/workspace/catalog/catalogs.rst index 77de87dc4..17297d8dd 100644 --- a/docs/workspace/catalog/catalogs.rst +++ b/docs/workspace/catalog/catalogs.rst @@ -24,10 +24,10 @@ w = WorkspaceClient() - created = w.catalogs.create(name=f"sdk-{time.time_ns()}") + created_catalog = w.catalogs.create(name=f"sdk-{time.time_ns()}") # cleanup - w.catalogs.delete(name=created.name, force=True) + w.catalogs.delete(name=created_catalog.name, force=True) Creates a new catalog instance in the parent metastore if the caller is a metastore admin or has the **CREATE_CATALOG** privilege. diff --git a/docs/workspace/catalog/external_locations.rst b/docs/workspace/catalog/external_locations.rst index acaac095d..56ce8ce77 100644 --- a/docs/workspace/catalog/external_locations.rst +++ b/docs/workspace/catalog/external_locations.rst @@ -30,20 +30,22 @@ w = WorkspaceClient() - credential = w.storage_credentials.create( + storage_credential = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + comment="created via SDK", ) - created = w.external_locations.create( + external_location = w.external_locations.create( name=f"sdk-{time.time_ns()}", - credential_name=credential.name, - url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), + credential_name=storage_credential.name, + comment="created via SDK", + url="s3://" + os.environ["TEST_BUCKET"] + "/" + f"sdk-{time.time_ns()}", ) # cleanup - w.storage_credentials.delete(name=credential.name) - w.external_locations.delete(name=created.name) + w.storage_credentials.delete(name=storage_credential.name) + w.external_locations.delete(name=external_location.name) Creates a new external location entry in the metastore. The caller must be a metastore admin or have the **CREATE_EXTERNAL_LOCATION** privilege on both the metastore and the associated storage @@ -105,20 +107,20 @@ credential = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) created = w.external_locations.create( name=f"sdk-{time.time_ns()}", credential_name=credential.name, - url=f's3://{os.environ["TEST_BUCKET"]}/sdk-{time.time_ns()}', + url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), ) - _ = w.external_locations.get(get=created.name) + _ = w.external_locations.get(name=created.name) # cleanup - w.storage_credentials.delete(delete=credential.name) - w.external_locations.delete(delete=created.name) + w.storage_credentials.delete(name=credential.name) + w.external_locations.delete(name=created.name) Gets an external location from the metastore. The caller must be either a metastore admin, the owner of the external location, or a user that has some privilege on the external location. @@ -191,24 +193,24 @@ credential = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) created = w.external_locations.create( name=f"sdk-{time.time_ns()}", credential_name=credential.name, - url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), + url=f's3://{os.environ["TEST_BUCKET"]}/sdk-{time.time_ns()}', ) _ = w.external_locations.update( name=created.name, credential_name=credential.name, - url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), + url=f's3://{os.environ["TEST_BUCKET"]}/sdk-{time.time_ns()}', ) # cleanup - w.storage_credentials.delete(name=credential.name) - w.external_locations.delete(name=created.name) + w.storage_credentials.delete(delete=credential.name) + w.external_locations.delete(delete=created.name) Updates an external location in the metastore. The caller must be the owner of the external location, or be a metastore admin. In the second case, the admin can only update the name of the external diff --git a/docs/workspace/catalog/storage_credentials.rst b/docs/workspace/catalog/storage_credentials.rst index ad6e4ebe5..df7d2bf2b 100644 --- a/docs/workspace/catalog/storage_credentials.rst +++ b/docs/workspace/catalog/storage_credentials.rst @@ -30,13 +30,14 @@ w = WorkspaceClient() - created = w.storage_credentials.create( + storage_credential = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + comment="created via SDK", ) # cleanup - w.storage_credentials.delete(delete=created.name) + w.storage_credentials.delete(name=storage_credential.name) Creates a new storage credential. @@ -98,13 +99,13 @@ created = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) - by_name = w.storage_credentials.get(name=created.name) + by_name = w.storage_credentials.get(get=created.name) # cleanup - w.storage_credentials.delete(name=created.name) + w.storage_credentials.delete(delete=created.name) Gets a storage credential from the metastore. The caller must be a metastore admin, the owner of the storage credential, or have some permission on the storage credential. @@ -123,10 +124,11 @@ .. code-block:: from databricks.sdk import WorkspaceClient + from databricks.sdk.service import catalog w = WorkspaceClient() - all = w.storage_credentials.list() + all = w.storage_credentials.list(catalog.ListStorageCredentialsRequest()) Gets an array of storage credentials (as __StorageCredentialInfo__ objects). The array is limited to only those storage credentials the caller has permission to access. If the caller is a metastore @@ -172,17 +174,17 @@ created = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) _ = w.storage_credentials.update( name=created.name, comment=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) # cleanup - w.storage_credentials.delete(delete=created.name) + w.storage_credentials.delete(name=created.name) Updates a storage credential on the metastore. diff --git a/docs/workspace/compute/clusters.rst b/docs/workspace/compute/clusters.rst index 8619a5e9a..463e34d0a 100644 --- a/docs/workspace/compute/clusters.rst +++ b/docs/workspace/compute/clusters.rst @@ -645,11 +645,10 @@ .. code-block:: from databricks.sdk import WorkspaceClient - from databricks.sdk.service import compute w = WorkspaceClient() - all = w.clusters.list(compute.ListClustersRequest()) + nodes = w.clusters.list_node_types() Return information about all pinned and active clusters, and all clusters terminated within the last 30 days. Clusters terminated prior to this period are not included. diff --git a/docs/workspace/jobs/jobs.rst b/docs/workspace/jobs/jobs.rst index 224a89722..e5798e3b4 100644 --- a/docs/workspace/jobs/jobs.rst +++ b/docs/workspace/jobs/jobs.rst @@ -359,23 +359,21 @@ w.clusters.ensure_cluster_is_running(os.environ["DATABRICKS_CLUSTER_ID"]) and os.environ["DATABRICKS_CLUSTER_ID"] ) - created_job = w.jobs.create( - name=f"sdk-{time.time_ns()}", + run = w.jobs.submit( + run_name=f"sdk-{time.time_ns()}", tasks=[ - jobs.Task( - description="test", + jobs.SubmitTask( existing_cluster_id=cluster_id, notebook_task=jobs.NotebookTask(notebook_path=notebook_path), - task_key="test", - timeout_seconds=0, + task_key=f"sdk-{time.time_ns()}", ) ], - ) + ).result() - by_id = w.jobs.get(job_id=created_job.job_id) + output = w.jobs.get_run_output(run_id=run.tasks[0].run_id) # cleanup - w.jobs.delete(job_id=created_job.job_id) + w.jobs.delete_run(run_id=run.run_id) Get a single job. @@ -524,37 +522,11 @@ .. code-block:: - import os - import time - from databricks.sdk import WorkspaceClient - from databricks.sdk.service import jobs w = WorkspaceClient() - notebook_path = f"/Users/{w.current_user.me().user_name}/sdk-{time.time_ns()}" - - cluster_id = ( - w.clusters.ensure_cluster_is_running(os.environ["DATABRICKS_CLUSTER_ID"]) and os.environ["DATABRICKS_CLUSTER_ID"] - ) - - created_job = w.jobs.create( - name=f"sdk-{time.time_ns()}", - tasks=[ - jobs.Task( - description="test", - existing_cluster_id=cluster_id, - notebook_task=jobs.NotebookTask(notebook_path=notebook_path), - task_key="test", - timeout_seconds=0, - ) - ], - ) - - run_list = w.jobs.list_runs(job_id=created_job.job_id) - - # cleanup - w.jobs.delete(job_id=created_job.job_id) + job_list = w.jobs.list(expand_tasks=False) List jobs. diff --git a/docs/workspace/ml/model_registry.rst b/docs/workspace/ml/model_registry.rst index e416ac56b..c528f4329 100644 --- a/docs/workspace/ml/model_registry.rst +++ b/docs/workspace/ml/model_registry.rst @@ -90,9 +90,7 @@ w = WorkspaceClient() - model = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") - - mv = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") + created = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") Creates a new registered model with the name specified in the request body. Throws `RESOURCE_ALREADY_EXISTS` if a registered model with the given name exists. @@ -736,13 +734,14 @@ w = WorkspaceClient() - created = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") + model = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") - model = w.model_registry.get_model(name=created.registered_model.name) + created = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") - w.model_registry.update_model( - name=model.registered_model_databricks.name, + w.model_registry.update_model_version( description=f"sdk-{time.time_ns()}", + name=created.model_version.name, + version=created.model_version.version, ) Updates a registered model. diff --git a/docs/workspace/postgres/postgres.rst b/docs/workspace/postgres/postgres.rst index 99fbc50e2..d356ef6a0 100644 --- a/docs/workspace/postgres/postgres.rst +++ b/docs/workspace/postgres/postgres.rst @@ -71,17 +71,17 @@ :returns: :class:`Operation` - .. py:method:: delete_branch(name: str) + .. py:method:: delete_branch(name: str) -> DeleteBranchOperation Delete a Branch. :param name: str The name of the Branch to delete. Format: projects/{project_id}/branches/{branch_id} - + :returns: :class:`Operation` - .. py:method:: delete_endpoint(name: str) + .. py:method:: delete_endpoint(name: str) -> DeleteEndpointOperation Delete an Endpoint. @@ -89,17 +89,17 @@ The name of the Endpoint to delete. Format: projects/{project_id}/branches/{branch_id}/endpoints/{endpoint_id} - + :returns: :class:`Operation` - .. py:method:: delete_project(name: str) + .. py:method:: delete_project(name: str) -> DeleteProjectOperation Delete a Project. :param name: str The name of the Project to delete. Format: projects/{project_id} - + :returns: :class:`Operation` .. py:method:: delete_role(name: str [, reassign_owned_to: Optional[str]]) -> DeleteRoleOperation diff --git a/docs/workspace/qualitymonitorv2/quality_monitor_v2.rst b/docs/workspace/qualitymonitorv2/quality_monitor_v2.rst index 006d137da..25ceaef3e 100644 --- a/docs/workspace/qualitymonitorv2/quality_monitor_v2.rst +++ b/docs/workspace/qualitymonitorv2/quality_monitor_v2.rst @@ -4,11 +4,12 @@ .. py:class:: QualityMonitorV2API - Manage data quality of UC objects (currently support `schema`) + [DEPRECATED] This API is deprecated. Please use the Data Quality Monitoring API instead (REST: + /api/data-quality/v1/monitors). Manage data quality of UC objects (currently support `schema`). .. py:method:: create_quality_monitor(quality_monitor: QualityMonitor) -> QualityMonitor - Create a quality monitor on UC object + [DEPRECATED] Create a quality monitor on UC object. Use Data Quality Monitoring API instead. :param quality_monitor: :class:`QualityMonitor` @@ -17,7 +18,7 @@ .. py:method:: delete_quality_monitor(object_type: str, object_id: str) - Delete a quality monitor on UC object + [DEPRECATED] Delete a quality monitor on UC object. Use Data Quality Monitoring API instead. :param object_type: str The type of the monitored object. Can be one of the following: schema. @@ -29,7 +30,7 @@ .. py:method:: get_quality_monitor(object_type: str, object_id: str) -> QualityMonitor - Read a quality monitor on UC object + [DEPRECATED] Read a quality monitor on UC object. Use Data Quality Monitoring API instead. :param object_type: str The type of the monitored object. Can be one of the following: schema. @@ -41,7 +42,7 @@ .. py:method:: list_quality_monitor( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[QualityMonitor] - (Unimplemented) List quality monitors + [DEPRECATED] (Unimplemented) List quality monitors. Use Data Quality Monitoring API instead. :param page_size: int (optional) :param page_token: str (optional) @@ -51,7 +52,8 @@ .. py:method:: update_quality_monitor(object_type: str, object_id: str, quality_monitor: QualityMonitor) -> QualityMonitor - (Unimplemented) Update a quality monitor on UC object + [DEPRECATED] (Unimplemented) Update a quality monitor on UC object. Use Data Quality Monitoring API + instead. :param object_type: str The type of the monitored object. Can be one of the following: schema. diff --git a/docs/workspace/sharing/providers.rst b/docs/workspace/sharing/providers.rst index 302039578..8f66df54a 100644 --- a/docs/workspace/sharing/providers.rst +++ b/docs/workspace/sharing/providers.rst @@ -101,12 +101,25 @@ .. code-block:: + import time + from databricks.sdk import WorkspaceClient - from databricks.sdk.service import sharing w = WorkspaceClient() - all = w.providers.list(sharing.ListProvidersRequest()) + public_share_recipient = """{ + "shareCredentialsVersion":1, + "bearerToken":"dapiabcdefghijklmonpqrstuvwxyz", + "endpoint":"https://sharing.delta.io/delta-sharing/" + } + """ + + created = w.providers.create(name=f"sdk-{time.time_ns()}", recipient_profile_str=public_share_recipient) + + shares = w.providers.list_shares(name=created.name) + + # cleanup + w.providers.delete(name=created.name) Gets an array of available authentication providers. The caller must either be a metastore admin, have the **USE_PROVIDER** privilege on the providers, or be the owner of the providers. Providers not owned diff --git a/docs/workspace/workspace/workspace.rst b/docs/workspace/workspace/workspace.rst index 694026c28..7303aa39d 100644 --- a/docs/workspace/workspace/workspace.rst +++ b/docs/workspace/workspace/workspace.rst @@ -79,7 +79,7 @@ notebook = f"/Users/{w.current_user.me().user_name}/sdk-{time.time_ns()}" - export_response = w.workspace.export(format=workspace.ExportFormat.SOURCE, path=notebook) + export_response = w.workspace.export_(format=workspace.ExportFormat.SOURCE, path=notebook) Exports an object or the contents of an entire directory. diff --git a/tests/databricks/sdk/service/lrotesting.py b/tests/databricks/sdk/service/lrotesting.py old mode 100755 new mode 100644 index c06822e8f..7987ef129 --- a/tests/databricks/sdk/service/lrotesting.py +++ b/tests/databricks/sdk/service/lrotesting.py @@ -22,7 +22,6 @@ class DatabricksServiceExceptionWithDetailsProto: """Databricks Error that is returned by all Databricks APIs.""" details: Optional[List[dict]] = None - """@pbjson-skip""" error_code: Optional[ErrorCode] = None