From db2d97757cc72ccc3727d74d8ef49c734d45025f Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Thu, 23 Oct 2025 02:15:08 +0530 Subject: [PATCH] [Internal] Update SDK to latest spec --- .codegen/_openapi_sha | 2 +- NEXT_CHANGELOG.md | 18 ++ databricks/sdk/service/catalog.py | 9 +- databricks/sdk/service/compute.py | 10 + databricks/sdk/service/dataquality.py | 249 ++++++++++++--- databricks/sdk/service/jobs.py | 111 ++++--- databricks/sdk/service/ml.py | 297 ++++++++++++++++++ databricks/sdk/service/oauth2.py | 28 +- databricks/sdk/service/pipelines.py | 4 +- databricks/sdk/service/provisioning.py | 11 +- databricks/sdk/service/settings.py | 2 + databricks/sdk/service/sharing.py | 38 --- databricks/sdk/service/sql.py | 75 +++-- docs/account/provisioning/workspaces.rst | 8 +- docs/dbdataclasses/catalog.rst | 9 +- docs/dbdataclasses/compute.rst | 18 ++ docs/dbdataclasses/ml.rst | 23 ++ docs/dbdataclasses/pipelines.rst | 2 +- docs/dbdataclasses/settings.rst | 6 + docs/dbdataclasses/sql.rst | 18 ++ docs/workspace/dataquality/data_quality.rst | 216 ++++++++++--- docs/workspace/jobs/jobs.rst | 48 ++- docs/workspace/ml/feature_engineering.rst | 61 ++++ .../sharing/recipient_federation_policies.rst | 20 -- tests/databricks/sdk/service/lrotesting.py | 91 ++++++ tests/generated/test_http_call.py | 4 +- 26 files changed, 1113 insertions(+), 265 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 20842dced..aa1180c39 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -c4784cea599325a13472b1455e7434d639362d8b \ No newline at end of file +b54bbd860200d735fa2c306ec1559090625370e6 \ No newline at end of file diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index a359f03d2..44865bfdb 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -12,3 +12,21 @@ ### Internal Changes ### API Changes +* Add `create_materialized_feature()`, `delete_materialized_feature()`, `get_materialized_feature()`, `list_materialized_features()` and `update_materialized_feature()` methods for [w.feature_engineering](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/ml/feature_engineering.html) workspace-level service. +* Add `filter_condition` field for `databricks.sdk.service.ml.Feature`. +* Add `absolute_session_lifetime_in_minutes` and `enable_single_use_refresh_tokens` fields for `databricks.sdk.service.oauth2.TokenAccessPolicy`. +* Add `network_connectivity_config_id` field for `databricks.sdk.service.provisioning.CreateWorkspaceRequest`. +* Add `oauth_mtls` enum value for `databricks.sdk.service.catalog.CredentialType`. +* Add `network_check_nic_failure_due_to_misconfig`, `network_check_dns_server_failure_due_to_misconfig`, `network_check_storage_failure_due_to_misconfig`, `network_check_metadata_endpoint_failure_due_to_misconfig`, `network_check_control_plane_failure_due_to_misconfig` and `network_check_multiple_components_failure_due_to_misconfig` enum values for `databricks.sdk.service.compute.TerminationReasonCode`. +* Add `creating` and `create_failed` enum values for `databricks.sdk.service.settings.NccPrivateEndpointRulePrivateLinkConnectionState`. +* Add `network_check_nic_failure_due_to_misconfig`, `network_check_dns_server_failure_due_to_misconfig`, `network_check_storage_failure_due_to_misconfig`, `network_check_metadata_endpoint_failure_due_to_misconfig`, `network_check_control_plane_failure_due_to_misconfig` and `network_check_multiple_components_failure_due_to_misconfig` enum values for `databricks.sdk.service.sql.TerminationReasonCode`. +* [Breaking] Change `display_name`, `evaluation`, `query_text`, `schedule` and `warehouse_id` fields for `databricks.sdk.service.sql.AlertV2` to be required. +* Change `display_name`, `evaluation`, `query_text`, `schedule` and `warehouse_id` fields for `databricks.sdk.service.sql.AlertV2` to be required. +* Change `comparison_operator` and `source` fields for `databricks.sdk.service.sql.AlertV2Evaluation` to be required. +* [Breaking] Change `comparison_operator` and `source` fields for `databricks.sdk.service.sql.AlertV2Evaluation` to be required. +* Change `name` field for `databricks.sdk.service.sql.AlertV2OperandColumn` to be required. +* [Breaking] Change `name` field for `databricks.sdk.service.sql.AlertV2OperandColumn` to be required. +* [Breaking] Change `quartz_cron_schedule` and `timezone_id` fields for `databricks.sdk.service.sql.CronSchedule` to be required. +* Change `quartz_cron_schedule` and `timezone_id` fields for `databricks.sdk.service.sql.CronSchedule` to be required. +* [Breaking] Remove `update()` method for [w.recipient_federation_policies](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/sharing/recipient_federation_policies.html) workspace-level service. +* [Breaking] Remove `results` field for `databricks.sdk.service.sql.ListAlertsV2Response`. \ No newline at end of file diff --git a/databricks/sdk/service/catalog.py b/databricks/sdk/service/catalog.py index e0036200a..182f8cf4e 100755 --- a/databricks/sdk/service/catalog.py +++ b/databricks/sdk/service/catalog.py @@ -1740,7 +1740,7 @@ def from_dict(cls, d: Dict[str, Any]) -> ConnectionInfo: class ConnectionType(Enum): - """Next Id: 38""" + """Next Id: 46""" BIGQUERY = "BIGQUERY" DATABRICKS = "DATABRICKS" @@ -2542,12 +2542,13 @@ class CredentialPurpose(Enum): class CredentialType(Enum): - """Next Id: 13""" + """Next Id: 14""" ANY_STATIC_CREDENTIAL = "ANY_STATIC_CREDENTIAL" BEARER_TOKEN = "BEARER_TOKEN" OAUTH_ACCESS_TOKEN = "OAUTH_ACCESS_TOKEN" OAUTH_M2M = "OAUTH_M2M" + OAUTH_MTLS = "OAUTH_MTLS" OAUTH_REFRESH_TOKEN = "OAUTH_REFRESH_TOKEN" OAUTH_RESOURCE_OWNER_PASSWORD = "OAUTH_RESOURCE_OWNER_PASSWORD" OAUTH_U2M = "OAUTH_U2M" @@ -8549,7 +8550,7 @@ def from_dict(cls, d: Dict[str, Any]) -> RowFilterOptions: @dataclass class SchemaInfo: - """Next ID: 40""" + """Next ID: 42""" browse_only: Optional[bool] = None """Indicates whether the principal is limited to retrieving metadata for the associated object @@ -8762,7 +8763,7 @@ def from_dict(cls, d: Dict[str, Any]) -> Securable: class SecurableKind(Enum): - """Latest kind: CONNECTION_REDSHIFT_IAM = 265; Next id:266""" + """Latest kind: CONNECTION_SALESFORCE_OAUTH_MTLS = 268; Next id:269""" TABLE_DB_STORAGE = "TABLE_DB_STORAGE" TABLE_DELTA = "TABLE_DELTA" diff --git a/databricks/sdk/service/compute.py b/databricks/sdk/service/compute.py index 77feca5fe..c6f9bd35d 100755 --- a/databricks/sdk/service/compute.py +++ b/databricks/sdk/service/compute.py @@ -7155,11 +7155,21 @@ class TerminationReasonCode(Enum): NEPHOS_RESOURCE_MANAGEMENT = "NEPHOS_RESOURCE_MANAGEMENT" NETVISOR_SETUP_TIMEOUT = "NETVISOR_SETUP_TIMEOUT" NETWORK_CHECK_CONTROL_PLANE_FAILURE = "NETWORK_CHECK_CONTROL_PLANE_FAILURE" + NETWORK_CHECK_CONTROL_PLANE_FAILURE_DUE_TO_MISCONFIG = "NETWORK_CHECK_CONTROL_PLANE_FAILURE_DUE_TO_MISCONFIG" NETWORK_CHECK_DNS_SERVER_FAILURE = "NETWORK_CHECK_DNS_SERVER_FAILURE" + NETWORK_CHECK_DNS_SERVER_FAILURE_DUE_TO_MISCONFIG = "NETWORK_CHECK_DNS_SERVER_FAILURE_DUE_TO_MISCONFIG" NETWORK_CHECK_METADATA_ENDPOINT_FAILURE = "NETWORK_CHECK_METADATA_ENDPOINT_FAILURE" + NETWORK_CHECK_METADATA_ENDPOINT_FAILURE_DUE_TO_MISCONFIG = ( + "NETWORK_CHECK_METADATA_ENDPOINT_FAILURE_DUE_TO_MISCONFIG" + ) NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE = "NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE" + NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE_DUE_TO_MISCONFIG = ( + "NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE_DUE_TO_MISCONFIG" + ) NETWORK_CHECK_NIC_FAILURE = "NETWORK_CHECK_NIC_FAILURE" + NETWORK_CHECK_NIC_FAILURE_DUE_TO_MISCONFIG = "NETWORK_CHECK_NIC_FAILURE_DUE_TO_MISCONFIG" NETWORK_CHECK_STORAGE_FAILURE = "NETWORK_CHECK_STORAGE_FAILURE" + NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG = "NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG" NETWORK_CONFIGURATION_FAILURE = "NETWORK_CONFIGURATION_FAILURE" NFS_MOUNT_FAILURE = "NFS_MOUNT_FAILURE" NO_ACTIVATED_K8S = "NO_ACTIVATED_K8S" diff --git a/databricks/sdk/service/dataquality.py b/databricks/sdk/service/dataquality.py index a5a2f8710..fa99857fb 100755 --- a/databricks/sdk/service/dataquality.py +++ b/databricks/sdk/service/dataquality.py @@ -163,7 +163,7 @@ class DataProfilingConfig: """The warehouse for dashboard creation""" inference_log: Optional[InferenceLogConfig] = None - """Configuration for monitoring inference log tables.""" + """`Analysis Configuration` for monitoring inference log tables.""" latest_monitor_failure_message: Optional[str] = None """The latest error message for a monitor failure.""" @@ -196,13 +196,13 @@ class DataProfilingConfig: high-cardinality columns, only the top 100 unique values by frequency will generate slices.""" snapshot: Optional[SnapshotConfig] = None - """Configuration for monitoring snapshot tables.""" + """`Analysis Configuration` for monitoring snapshot tables.""" status: Optional[DataProfilingStatus] = None """The data profiling monitor status.""" time_series: Optional[TimeSeriesConfig] = None - """Configuration for monitoring time series tables.""" + """`Analysis Configuration` for monitoring time series tables.""" warehouse_id: Optional[str] = None """Optional argument to specify the warehouse for dashboard creation. If not specified, the first @@ -556,13 +556,24 @@ class Monitor: """The type of the monitored object. Can be one of the following: `schema` or `table`.""" object_id: str - """The UUID of the request object. For example, schema id.""" + """The UUID of the request object. It is `schema_id` for `schema`, and `table_id` for `table`. + + Find the `schema_id` from either: 1. The [schema_id] of the `Schemas` resource. 2. In [Catalog + Explorer] > select the `schema` > go to the `Details` tab > the `Schema ID` field. + + Find the `table_id` from either: 1. The [table_id] of the `Tables` resource. 2. In [Catalog + Explorer] > select the `table` > go to the `Details` tab > the `Table ID` field. + + [Catalog Explorer]: https://docs.databricks.com/aws/en/catalog-explorer/ + [schema_id]: https://docs.databricks.com/api/workspace/schemas/get#schema_id + [table_id]: https://docs.databricks.com/api/workspace/tables/get#table_id""" anomaly_detection_config: Optional[AnomalyDetectionConfig] = None """Anomaly Detection Configuration, applicable to `schema` object types.""" data_profiling_config: Optional[DataProfilingConfig] = None - """Data Profiling Configuration, applicable to `table` object types""" + """Data Profiling Configuration, applicable to `table` object types. Exactly one `Analysis + Configuration` must be present.""" def as_dict(self) -> dict: """Serializes the Monitor into a dictionary suitable for use as a JSON request body.""" @@ -664,7 +675,17 @@ class Refresh: """The type of the monitored object. Can be one of the following: `schema`or `table`.""" object_id: str - """The UUID of the request object. For example, table id.""" + """The UUID of the request object. It is `schema_id` for `schema`, and `table_id` for `table`. + + Find the `schema_id` from either: 1. The [schema_id] of the `Schemas` resource. 2. In [Catalog + Explorer] > select the `schema` > go to the `Details` tab > the `Schema ID` field. + + Find the `table_id` from either: 1. The [table_id] of the `Tables` resource. 2. In [Catalog + Explorer] > select the `table` > go to the `Details` tab > the `Table ID` field. + + [Catalog Explorer]: https://docs.databricks.com/aws/en/catalog-explorer/ + [schema_id]: https://docs.databricks.com/api/workspace/schemas/get#schema_id + [table_id]: https://docs.databricks.com/api/workspace/tables/get#table_id""" end_time_ms: Optional[int] = None """Time when the refresh ended (milliseconds since 1/1/1970 UTC).""" @@ -826,12 +847,28 @@ def __init__(self, api_client): self._api = api_client def cancel_refresh(self, object_type: str, object_id: str, refresh_id: int) -> CancelRefreshResponse: - """Cancels a data quality monitor refresh. Currently only supported for the `table` `object_type`. + """Cancels a data quality monitor refresh. Currently only supported for the `table` `object_type`. The + call must be made in the same workspace as where the monitor was created. + + The caller must have either of the following sets of permissions: 1. **MANAGE** and **USE_CATALOG** on + the table's parent catalog. 2. **USE_CATALOG** on the table's parent catalog, and **MANAGE** and + **USE_SCHEMA** on the table's parent schema. 3. **USE_CATALOG** on the table's parent catalog, + **USE_SCHEMA** on the table's parent schema, and **MANAGE** on the table. :param object_type: str The type of the monitored object. Can be one of the following: `schema` or `table`. :param object_id: str - The UUID of the request object. For example, schema id. + The UUID of the request object. It is `schema_id` for `schema`, and `table_id` for `table`. + + Find the `schema_id` from either: 1. The [schema_id] of the `Schemas` resource. 2. In [Catalog + Explorer] > select the `schema` > go to the `Details` tab > the `Schema ID` field. + + Find the `table_id` from either: 1. The [table_id] of the `Tables` resource. 2. In [Catalog + Explorer] > select the `table` > go to the `Details` tab > the `Table ID` field. + + [Catalog Explorer]: https://docs.databricks.com/aws/en/catalog-explorer/ + [schema_id]: https://docs.databricks.com/api/workspace/schemas/get#schema_id + [table_id]: https://docs.databricks.com/api/workspace/tables/get#table_id :param refresh_id: int Unique id of the refresh operation. @@ -854,14 +891,19 @@ def create_monitor(self, monitor: Monitor) -> Monitor: """Create a data quality monitor on a Unity Catalog object. The caller must provide either `anomaly_detection_config` for a schema monitor or `data_profiling_config` for a table monitor. - For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent catalog, - have **USE_SCHEMA** on the table's parent schema, and have **SELECT** access on the table 2. have - **USE_CATALOG** on the table's parent catalog, be an owner of the table's parent schema, and have - **SELECT** access on the table. 3. have the following permissions: - **USE_CATALOG** on the table's - parent catalog - **USE_SCHEMA** on the table's parent schema - be an owner of the table. + For the `table` `object_type`, the caller must have either of the following sets of permissions: 1. + **MANAGE** and **USE_CATALOG** on the table's parent catalog, **USE_SCHEMA** on the table's parent + schema, and **SELECT** on the table 2. **USE_CATALOG** on the table's parent catalog, **MANAGE** and + **USE_SCHEMA** on the table's parent schema, and **SELECT** on the table. 3. **USE_CATALOG** on the + table's parent catalog, **USE_SCHEMA** on the table's parent schema, and **MANAGE** and **SELECT** on + the table. Workspace assets, such as the dashboard, will be created in the workspace where this call was made. + For the `schema` `object_type`, the caller must have either of the following sets of permissions: 1. + **MANAGE** and **USE_CATALOG** on the schema's parent catalog. 2. **USE_CATALOG** on the schema's + parent catalog, and **MANAGE** and **USE_SCHEMA** on the schema. + :param monitor: :class:`Monitor` The monitor to create. @@ -877,17 +919,28 @@ def create_monitor(self, monitor: Monitor) -> Monitor: return Monitor.from_dict(res) def create_refresh(self, object_type: str, object_id: str, refresh: Refresh) -> Refresh: - """Creates a refresh. Currently only supported for the `table` `object_type`. + """Creates a refresh. Currently only supported for the `table` `object_type`. The call must be made in + the same workspace as where the monitor was created. - The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the - table's parent catalog and be an owner of the table's parent schema 3. have the following permissions: - - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an - owner of the table + The caller must have either of the following sets of permissions: 1. **MANAGE** and **USE_CATALOG** on + the table's parent catalog. 2. **USE_CATALOG** on the table's parent catalog, and **MANAGE** and + **USE_SCHEMA** on the table's parent schema. 3. **USE_CATALOG** on the table's parent catalog, + **USE_SCHEMA** on the table's parent schema, and **MANAGE** on the table. :param object_type: str The type of the monitored object. Can be one of the following: `schema`or `table`. :param object_id: str - The UUID of the request object. For example, table id. + The UUID of the request object. It is `schema_id` for `schema`, and `table_id` for `table`. + + Find the `schema_id` from either: 1. The [schema_id] of the `Schemas` resource. 2. In [Catalog + Explorer] > select the `schema` > go to the `Details` tab > the `Schema ID` field. + + Find the `table_id` from either: 1. The [table_id] of the `Tables` resource. 2. In [Catalog + Explorer] > select the `table` > go to the `Details` tab > the `Table ID` field. + + [Catalog Explorer]: https://docs.databricks.com/aws/en/catalog-explorer/ + [schema_id]: https://docs.databricks.com/api/workspace/schemas/get#schema_id + [table_id]: https://docs.databricks.com/api/workspace/tables/get#table_id :param refresh: :class:`Refresh` The refresh to create @@ -907,18 +960,32 @@ def create_refresh(self, object_type: str, object_id: str, refresh: Refresh) -> def delete_monitor(self, object_type: str, object_id: str): """Delete a data quality monitor on Unity Catalog object. - For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent catalog 2. - have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema 3. - have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on - the table's parent schema - be an owner of the table. + For the `table` `object_type`, the caller must have either of the following sets of permissions: + **MANAGE** and **USE_CATALOG** on the table's parent catalog. **USE_CATALOG** on the table's parent + catalog, and **MANAGE** and **USE_SCHEMA** on the table's parent schema. **USE_CATALOG** on the + table's parent catalog, **USE_SCHEMA** on the table's parent schema, and **MANAGE** on the table. Note that the metric tables and dashboard will not be deleted as part of this call; those assets must be manually cleaned up (if desired). + For the `schema` `object_type`, the caller must have either of the following sets of permissions: 1. + **MANAGE** and **USE_CATALOG** on the schema's parent catalog. 2. **USE_CATALOG** on the schema's + parent catalog, and **MANAGE** and **USE_SCHEMA** on the schema. + :param object_type: str The type of the monitored object. Can be one of the following: `schema` or `table`. :param object_id: str - The UUID of the request object. For example, schema id. + The UUID of the request object. It is `schema_id` for `schema`, and `table_id` for `table`. + + Find the `schema_id` from either: 1. The [schema_id] of the `Schemas` resource. 2. In [Catalog + Explorer] > select the `schema` > go to the `Details` tab > the `Schema ID` field. + + Find the `table_id` from either: 1. The [table_id] of the `Tables` resource. 2. In [Catalog + Explorer] > select the `table` > go to the `Details` tab > the `Table ID` field. + + [Catalog Explorer]: https://docs.databricks.com/aws/en/catalog-explorer/ + [schema_id]: https://docs.databricks.com/api/workspace/schemas/get#schema_id + [table_id]: https://docs.databricks.com/api/workspace/tables/get#table_id """ @@ -935,7 +1002,17 @@ def delete_refresh(self, object_type: str, object_id: str, refresh_id: int): :param object_type: str The type of the monitored object. Can be one of the following: `schema` or `table`. :param object_id: str - The UUID of the request object. For example, schema id. + The UUID of the request object. It is `schema_id` for `schema`, and `table_id` for `table`. + + Find the `schema_id` from either: 1. The [schema_id] of the `Schemas` resource. 2. In [Catalog + Explorer] > select the `schema` > go to the `Details` tab > the `Schema ID` field. + + Find the `table_id` from either: 1. The [table_id] of the `Tables` resource. 2. In [Catalog + Explorer] > select the `table` > go to the `Details` tab > the `Table ID` field. + + [Catalog Explorer]: https://docs.databricks.com/aws/en/catalog-explorer/ + [schema_id]: https://docs.databricks.com/api/workspace/schemas/get#schema_id + [table_id]: https://docs.databricks.com/api/workspace/tables/get#table_id :param refresh_id: int Unique id of the refresh operation. @@ -951,21 +1028,35 @@ def delete_refresh(self, object_type: str, object_id: str, refresh_id: int): ) def get_monitor(self, object_type: str, object_id: str) -> Monitor: - """Read a data quality monitor on Unity Catalog object. + """Read a data quality monitor on a Unity Catalog object. + + For the `table` `object_type`, the caller must have either of the following sets of permissions: 1. + **MANAGE** and **USE_CATALOG** on the table's parent catalog. 2. **USE_CATALOG** on the table's parent + catalog, and **MANAGE** and **USE_SCHEMA** on the table's parent schema. 3. **USE_CATALOG** on the + table's parent catalog, **USE_SCHEMA** on the table's parent schema, and **SELECT** on the table. - For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent catalog 2. - have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema. 3. - have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on - the table's parent schema - **SELECT** privilege on the table. + For the `schema` `object_type`, the caller must have either of the following sets of permissions: 1. + **MANAGE** and **USE_CATALOG** on the schema's parent catalog. 2. **USE_CATALOG** on the schema's + parent catalog, and **USE_SCHEMA** on the schema. - The returned information includes configuration values, as well as information on assets created by - the monitor. Some information (e.g., dashboard) may be filtered out if the caller is in a different - workspace than where the monitor was created. + The returned information includes configuration values on the entity and parent entity as well as + information on assets created by the monitor. Some information (e.g. dashboard) may be filtered out if + the caller is in a different workspace than where the monitor was created. :param object_type: str The type of the monitored object. Can be one of the following: `schema` or `table`. :param object_id: str - The UUID of the request object. For example, schema id. + The UUID of the request object. It is `schema_id` for `schema`, and `table_id` for `table`. + + Find the `schema_id` from either: 1. The [schema_id] of the `Schemas` resource. 2. In [Catalog + Explorer] > select the `schema` > go to the `Details` tab > the `Schema ID` field. + + Find the `table_id` from either: 1. The [table_id] of the `Tables` resource. 2. In [Catalog + Explorer] > select the `table` > go to the `Details` tab > the `Table ID` field. + + [Catalog Explorer]: https://docs.databricks.com/aws/en/catalog-explorer/ + [schema_id]: https://docs.databricks.com/api/workspace/schemas/get#schema_id + [table_id]: https://docs.databricks.com/api/workspace/tables/get#table_id :returns: :class:`Monitor` """ @@ -978,17 +1069,32 @@ def get_monitor(self, object_type: str, object_id: str) -> Monitor: return Monitor.from_dict(res) def get_refresh(self, object_type: str, object_id: str, refresh_id: int) -> Refresh: - """Get data quality monitor refresh. + """Get data quality monitor refresh. The call must be made in the same workspace as where the monitor was + created. - For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent catalog 2. - have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema 3. - have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on - the table's parent schema - **SELECT** privilege on the table. + For the `table` `object_type`, the caller must have either of the following sets of permissions: 1. + **MANAGE** and **USE_CATALOG** on the table's parent catalog. 2. **USE_CATALOG** on the table's parent + catalog, and **MANAGE** and **USE_SCHEMA** on the table's parent schema. 3. **USE_CATALOG** on the + table's parent catalog, **USE_SCHEMA** on the table's parent schema, and **SELECT** on the table. + + For the `schema` `object_type`, the caller must have either of the following sets of permissions: 1. + **MANAGE** and **USE_CATALOG** on the schema's parent catalog. 2. **USE_CATALOG** on the schema's + parent catalog, and **USE_SCHEMA** on the schema. :param object_type: str The type of the monitored object. Can be one of the following: `schema` or `table`. :param object_id: str - The UUID of the request object. For example, schema id. + The UUID of the request object. It is `schema_id` for `schema`, and `table_id` for `table`. + + Find the `schema_id` from either: 1. The [schema_id] of the `Schemas` resource. 2. In [Catalog + Explorer] > select the `schema` > go to the `Details` tab > the `Schema ID` field. + + Find the `table_id` from either: 1. The [table_id] of the `Tables` resource. 2. In [Catalog + Explorer] > select the `table` > go to the `Details` tab > the `Table ID` field. + + [Catalog Explorer]: https://docs.databricks.com/aws/en/catalog-explorer/ + [schema_id]: https://docs.databricks.com/api/workspace/schemas/get#schema_id + [table_id]: https://docs.databricks.com/api/workspace/tables/get#table_id :param refresh_id: int Unique id of the refresh operation. @@ -1034,17 +1140,32 @@ def list_monitor(self, *, page_size: Optional[int] = None, page_token: Optional[ def list_refresh( self, object_type: str, object_id: str, *, page_size: Optional[int] = None, page_token: Optional[str] = None ) -> Iterator[Refresh]: - """List data quality monitor refreshes. + """List data quality monitor refreshes. The call must be made in the same workspace as where the monitor + was created. + + For the `table` `object_type`, the caller must have either of the following sets of permissions: 1. + **MANAGE** and **USE_CATALOG** on the table's parent catalog. 2. **USE_CATALOG** on the table's parent + catalog, and **MANAGE** and **USE_SCHEMA** on the table's parent schema. 3. **USE_CATALOG** on the + table's parent catalog, **USE_SCHEMA** on the table's parent schema, and **SELECT** on the table. - For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent catalog 2. - have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema 3. - have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on - the table's parent schema - **SELECT** privilege on the table. + For the `schema` `object_type`, the caller must have either of the following sets of permissions: 1. + **MANAGE** and **USE_CATALOG** on the schema's parent catalog. 2. **USE_CATALOG** on the schema's + parent catalog, and **USE_SCHEMA** on the schema. :param object_type: str The type of the monitored object. Can be one of the following: `schema` or `table`. :param object_id: str - The UUID of the request object. For example, schema id. + The UUID of the request object. It is `schema_id` for `schema`, and `table_id` for `table`. + + Find the `schema_id` from either: 1. The [schema_id] of the `Schemas` resource. 2. In [Catalog + Explorer] > select the `schema` > go to the `Details` tab > the `Schema ID` field. + + Find the `table_id` from either: 1. The [table_id] of the `Tables` resource. 2. In [Catalog + Explorer] > select the `table` > go to the `Details` tab > the `Table ID` field. + + [Catalog Explorer]: https://docs.databricks.com/aws/en/catalog-explorer/ + [schema_id]: https://docs.databricks.com/api/workspace/schemas/get#schema_id + [table_id]: https://docs.databricks.com/api/workspace/tables/get#table_id :param page_size: int (optional) :param page_token: str (optional) @@ -1077,15 +1198,29 @@ def list_refresh( def update_monitor(self, object_type: str, object_id: str, monitor: Monitor, update_mask: str) -> Monitor: """Update a data quality monitor on Unity Catalog object. - For the `table` `object_type`, The caller must either: 1. be an owner of the table's parent catalog 2. - have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema 3. - have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on - the table's parent schema - be an owner of the table. + For the `table` `object_type`, the caller must have either of the following sets of permissions: 1. + **MANAGE** and **USE_CATALOG** on the table's parent catalog. 2. **USE_CATALOG** on the table's parent + catalog, and **MANAGE** and **USE_SCHEMA** on the table's parent schema. 3. **USE_CATALOG** on the + table's parent catalog, **USE_SCHEMA** on the table's parent schema, and **MANAGE** on the table. + + For the `schema` `object_type`, the caller must have either of the following sets of permissions: 1. + **MANAGE** and **USE_CATALOG** on the schema's parent catalog. 2. **USE_CATALOG** on the schema's + parent catalog, and **MANAGE** and **USE_SCHEMA** on the schema. :param object_type: str The type of the monitored object. Can be one of the following: `schema` or `table`. :param object_id: str - The UUID of the request object. For example, schema id. + The UUID of the request object. It is `schema_id` for `schema`, and `table_id` for `table`. + + Find the `schema_id` from either: 1. The [schema_id] of the `Schemas` resource. 2. In [Catalog + Explorer] > select the `schema` > go to the `Details` tab > the `Schema ID` field. + + Find the `table_id` from either: 1. The [table_id] of the `Tables` resource. 2. In [Catalog + Explorer] > select the `table` > go to the `Details` tab > the `Table ID` field. + + [Catalog Explorer]: https://docs.databricks.com/aws/en/catalog-explorer/ + [schema_id]: https://docs.databricks.com/api/workspace/schemas/get#schema_id + [table_id]: https://docs.databricks.com/api/workspace/tables/get#table_id :param monitor: :class:`Monitor` The monitor to update. :param update_mask: str @@ -1116,7 +1251,17 @@ def update_refresh( :param object_type: str The type of the monitored object. Can be one of the following: `schema` or `table`. :param object_id: str - The UUID of the request object. For example, schema id. + The UUID of the request object. It is `schema_id` for `schema`, and `table_id` for `table`. + + Find the `schema_id` from either: 1. The [schema_id] of the `Schemas` resource. 2. In [Catalog + Explorer] > select the `schema` > go to the `Details` tab > the `Schema ID` field. + + Find the `table_id` from either: 1. The [table_id] of the `Tables` resource. 2. In [Catalog + Explorer] > select the `table` > go to the `Details` tab > the `Table ID` field. + + [Catalog Explorer]: https://docs.databricks.com/aws/en/catalog-explorer/ + [schema_id]: https://docs.databricks.com/api/workspace/schemas/get#schema_id + [table_id]: https://docs.databricks.com/api/workspace/tables/get#table_id :param refresh_id: int Unique id of the refresh operation. :param refresh: :class:`Refresh` diff --git a/databricks/sdk/service/jobs.py b/databricks/sdk/service/jobs.py index 8aa530264..9f8766784 100755 --- a/databricks/sdk/service/jobs.py +++ b/databricks/sdk/service/jobs.py @@ -561,6 +561,9 @@ def from_dict(cls, d: Dict[str, Any]) -> CleanRoomTaskRunState: @dataclass class CleanRoomsNotebookTask: + """Clean Rooms notebook task for V1 Clean Room service (GA). Replaces the deprecated + CleanRoomNotebookTask (defined above) which was for V0 service.""" + clean_room_name: str """The clean room that the notebook belongs to.""" @@ -5016,7 +5019,11 @@ class RunJobTask: dbt_commands: Optional[List[str]] = None """An array of commands to execute for jobs with the dbt task, for example `"dbt_commands": ["dbt - deps", "dbt seed", "dbt deps", "dbt seed", "dbt run"]`""" + deps", "dbt seed", "dbt deps", "dbt seed", "dbt run"]` + + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. + + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown""" jar_params: Optional[List[str]] = None """A list of parameters for jobs with Spark JAR tasks, for example `"jar_params": ["john doe", @@ -5025,9 +5032,9 @@ class RunJobTask: be specified in conjunction with notebook_params. The JSON representation of this field (for example `{"jar_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables] to set parameters containing information about job runs. + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. - [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables""" + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown""" job_parameters: Optional[Dict[str, str]] = None """Job-level parameters used to trigger the job.""" @@ -5041,13 +5048,13 @@ class RunJobTask: notebook_params cannot be specified in conjunction with jar_params. - Use [Task parameter variables] to set parameters containing information about job runs. + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. The JSON representation of this field (for example `{"notebook_params":{"name":"john doe","age":"35"}}`) cannot exceed 10,000 bytes. - [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables - [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html""" + [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown""" pipeline_params: Optional[PipelineParams] = None """Controls whether the pipeline should perform a full refresh""" @@ -5060,7 +5067,7 @@ class RunJobTask: `run-now`, it would overwrite the parameters specified in job setting. The JSON representation of this field (for example `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables] to set parameters containing information about job runs. + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. Important @@ -5068,7 +5075,7 @@ class RunJobTask: returns an error. Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis. - [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables""" + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown""" spark_submit_params: Optional[List[str]] = None """A list of parameters for jobs with spark submit task, for example `"spark_submit_params": @@ -5077,7 +5084,7 @@ class RunJobTask: parameters specified in job setting. The JSON representation of this field (for example `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables] to set parameters containing information about job runs + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. Important @@ -5085,11 +5092,15 @@ class RunJobTask: returns an error. Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis. - [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables""" + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown""" sql_params: Optional[Dict[str, str]] = None """A map from keys to values for jobs with SQL task, for example `"sql_params": {"name": "john - doe", "age": "35"}`. The SQL alert task does not support custom parameters.""" + doe", "age": "35"}`. The SQL alert task does not support custom parameters. + + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. + + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown""" def as_dict(self) -> dict: """Serializes the RunJobTask into a dictionary suitable for use as a JSON request body.""" @@ -5377,7 +5388,11 @@ def from_dict(cls, d: Dict[str, Any]) -> RunOutput: class RunParameters: dbt_commands: Optional[List[str]] = None """An array of commands to execute for jobs with the dbt task, for example `"dbt_commands": ["dbt - deps", "dbt seed", "dbt deps", "dbt seed", "dbt run"]`""" + deps", "dbt seed", "dbt deps", "dbt seed", "dbt run"]` + + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. + + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown""" jar_params: Optional[List[str]] = None """A list of parameters for jobs with Spark JAR tasks, for example `"jar_params": ["john doe", @@ -5386,9 +5401,9 @@ class RunParameters: be specified in conjunction with notebook_params. The JSON representation of this field (for example `{"jar_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables] to set parameters containing information about job runs. + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. - [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables""" + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown""" notebook_params: Optional[Dict[str, str]] = None """A map from keys to values for jobs with notebook task, for example `"notebook_params": {"name": @@ -5399,13 +5414,13 @@ class RunParameters: notebook_params cannot be specified in conjunction with jar_params. - Use [Task parameter variables] to set parameters containing information about job runs. + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. The JSON representation of this field (for example `{"notebook_params":{"name":"john doe","age":"35"}}`) cannot exceed 10,000 bytes. - [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables - [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html""" + [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown""" pipeline_params: Optional[PipelineParams] = None """Controls whether the pipeline should perform a full refresh""" @@ -5418,7 +5433,7 @@ class RunParameters: `run-now`, it would overwrite the parameters specified in job setting. The JSON representation of this field (for example `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables] to set parameters containing information about job runs. + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. Important @@ -5426,7 +5441,7 @@ class RunParameters: returns an error. Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis. - [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables""" + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown""" spark_submit_params: Optional[List[str]] = None """A list of parameters for jobs with spark submit task, for example `"spark_submit_params": @@ -5435,7 +5450,7 @@ class RunParameters: parameters specified in job setting. The JSON representation of this field (for example `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables] to set parameters containing information about job runs + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. Important @@ -5443,11 +5458,15 @@ class RunParameters: returns an error. Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis. - [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables""" + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown""" sql_params: Optional[Dict[str, str]] = None """A map from keys to values for jobs with SQL task, for example `"sql_params": {"name": "john - doe", "age": "35"}`. The SQL alert task does not support custom parameters.""" + doe", "age": "35"}`. The SQL alert task does not support custom parameters. + + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. + + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown""" def as_dict(self) -> dict: """Serializes the RunParameters into a dictionary suitable for use as a JSON request body.""" @@ -8981,6 +9000,10 @@ def repair_run( :param dbt_commands: List[str] (optional) An array of commands to execute for jobs with the dbt task, for example `"dbt_commands": ["dbt deps", "dbt seed", "dbt deps", "dbt seed", "dbt run"]` + + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. + + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown :param jar_params: List[str] (optional) A list of parameters for jobs with Spark JAR tasks, for example `"jar_params": ["john doe", "35"]`. The parameters are used to invoke the main function of the main class specified in the Spark JAR @@ -8988,9 +9011,9 @@ def repair_run( in conjunction with notebook_params. The JSON representation of this field (for example `{"jar_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables] to set parameters containing information about job runs. + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. - [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown :param job_parameters: Dict[str,str] (optional) Job-level parameters used in the run. for example `"param": "overriding_val"` :param latest_repair_id: int (optional) @@ -9005,13 +9028,13 @@ def repair_run( notebook_params cannot be specified in conjunction with jar_params. - Use [Task parameter variables] to set parameters containing information about job runs. + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. The JSON representation of this field (for example `{"notebook_params":{"name":"john doe","age":"35"}}`) cannot exceed 10,000 bytes. - [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown :param performance_target: :class:`PerformanceTarget` (optional) The performance mode on a serverless job. The performance target determines the level of compute performance or cost-efficiency for the run. This field overrides the performance target defined on @@ -9029,7 +9052,7 @@ def repair_run( would overwrite the parameters specified in job setting. The JSON representation of this field (for example `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables] to set parameters containing information about job runs. + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. Important @@ -9037,7 +9060,7 @@ def repair_run( returns an error. Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis. - [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown :param rerun_all_failed_tasks: bool (optional) If true, repair all failed tasks. Only one of `rerun_tasks` or `rerun_all_failed_tasks` can be used. :param rerun_dependent_tasks: bool (optional) @@ -9052,7 +9075,7 @@ def repair_run( in job setting. The JSON representation of this field (for example `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables] to set parameters containing information about job runs + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. Important @@ -9060,11 +9083,15 @@ def repair_run( returns an error. Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis. - [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown :param sql_params: Dict[str,str] (optional) A map from keys to values for jobs with SQL task, for example `"sql_params": {"name": "john doe", "age": "35"}`. The SQL alert task does not support custom parameters. + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. + + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown + :returns: Long-running operation waiter for :class:`Run`. See :method:wait_get_run_job_terminated_or_skipped for more details. @@ -9200,6 +9227,10 @@ def run_now( :param dbt_commands: List[str] (optional) An array of commands to execute for jobs with the dbt task, for example `"dbt_commands": ["dbt deps", "dbt seed", "dbt deps", "dbt seed", "dbt run"]` + + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. + + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown :param idempotency_token: str (optional) An optional token to guarantee the idempotency of job run requests. If a run with the provided token already exists, the request does not create a new run but returns the ID of the existing run @@ -9220,9 +9251,9 @@ def run_now( in conjunction with notebook_params. The JSON representation of this field (for example `{"jar_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables] to set parameters containing information about job runs. + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. - [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown :param job_parameters: Dict[str,str] (optional) Job-level parameters used in the run. for example `"param": "overriding_val"` :param notebook_params: Dict[str,str] (optional) @@ -9234,13 +9265,13 @@ def run_now( notebook_params cannot be specified in conjunction with jar_params. - Use [Task parameter variables] to set parameters containing information about job runs. + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. The JSON representation of this field (for example `{"notebook_params":{"name":"john doe","age":"35"}}`) cannot exceed 10,000 bytes. - [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown :param only: List[str] (optional) A list of task keys to run inside of the job. If this field is not provided, all tasks in the job will be run. @@ -9261,7 +9292,7 @@ def run_now( would overwrite the parameters specified in job setting. The JSON representation of this field (for example `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables] to set parameters containing information about job runs. + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. Important @@ -9269,7 +9300,7 @@ def run_now( returns an error. Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis. - [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown :param queue: :class:`QueueSettings` (optional) The queue settings of the run. :param spark_submit_params: List[str] (optional) @@ -9279,7 +9310,7 @@ def run_now( in job setting. The JSON representation of this field (for example `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables] to set parameters containing information about job runs + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. Important @@ -9287,11 +9318,15 @@ def run_now( returns an error. Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis. - [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown :param sql_params: Dict[str,str] (optional) A map from keys to values for jobs with SQL task, for example `"sql_params": {"name": "john doe", "age": "35"}`. The SQL alert task does not support custom parameters. + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. + + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown + :returns: Long-running operation waiter for :class:`Run`. See :method:wait_get_run_job_terminated_or_skipped for more details. diff --git a/databricks/sdk/service/ml.py b/databricks/sdk/service/ml.py index 1e0d689d8..b42f008f9 100755 --- a/databricks/sdk/service/ml.py +++ b/databricks/sdk/service/ml.py @@ -1299,11 +1299,16 @@ class Feature: description: Optional[str] = None """The description of the feature.""" + filter_condition: Optional[str] = None + """The filter condition applied to the source data before aggregation.""" + def as_dict(self) -> dict: """Serializes the Feature into a dictionary suitable for use as a JSON request body.""" body = {} if self.description is not None: body["description"] = self.description + if self.filter_condition is not None: + body["filter_condition"] = self.filter_condition if self.full_name is not None: body["full_name"] = self.full_name if self.function: @@ -1321,6 +1326,8 @@ def as_shallow_dict(self) -> dict: body = {} if self.description is not None: body["description"] = self.description + if self.filter_condition is not None: + body["filter_condition"] = self.filter_condition if self.full_name is not None: body["full_name"] = self.full_name if self.function: @@ -1338,6 +1345,7 @@ def from_dict(cls, d: Dict[str, Any]) -> Feature: """Deserializes the Feature from a dictionary.""" return cls( description=d.get("description", None), + filter_condition=d.get("filter_condition", None), full_name=d.get("full_name", None), function=_from_dict(d, "function", Function), inputs=d.get("inputs", None), @@ -2419,6 +2427,41 @@ def from_dict(cls, d: Dict[str, Any]) -> ListFeaturesResponse: return cls(features=_repeated_dict(d, "features", Feature), next_page_token=d.get("next_page_token", None)) +@dataclass +class ListMaterializedFeaturesResponse: + materialized_features: Optional[List[MaterializedFeature]] = None + """List of materialized features.""" + + next_page_token: Optional[str] = None + """Pagination token to request the next page of results for this query.""" + + def as_dict(self) -> dict: + """Serializes the ListMaterializedFeaturesResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.materialized_features: + body["materialized_features"] = [v.as_dict() for v in self.materialized_features] + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ListMaterializedFeaturesResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.materialized_features: + body["materialized_features"] = self.materialized_features + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ListMaterializedFeaturesResponse: + """Deserializes the ListMaterializedFeaturesResponse from a dictionary.""" + return cls( + materialized_features=_repeated_dict(d, "materialized_features", MaterializedFeature), + next_page_token=d.get("next_page_token", None), + ) + + @dataclass class ListModelsResponse: next_page_token: Optional[str] = None @@ -2937,6 +2980,90 @@ def from_dict(cls, d: Dict[str, Any]) -> LoggedModelTag: return cls(key=d.get("key", None), value=d.get("value", None)) +@dataclass +class MaterializedFeature: + """A materialized feature represents a feature that is continuously computed and stored.""" + + feature_name: str + """The full name of the feature in Unity Catalog.""" + + offline_store_config: OfflineStoreConfig + + online_store_config: OnlineStore + + last_materialization_time: Optional[str] = None + """The timestamp when the pipeline last ran and updated the materialized feature values. If the + pipeline has not run yet, this field will be null.""" + + materialized_feature_id: Optional[str] = None + """Unique identifier for the materialized feature.""" + + pipeline_schedule_state: Optional[MaterializedFeaturePipelineScheduleState] = None + """The schedule state of the materialization pipeline.""" + + table_name: Optional[str] = None + """The fully qualified Unity Catalog path to the table containing the materialized feature (Delta + table or Lakebase table). Output only.""" + + def as_dict(self) -> dict: + """Serializes the MaterializedFeature into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.feature_name is not None: + body["feature_name"] = self.feature_name + if self.last_materialization_time is not None: + body["last_materialization_time"] = self.last_materialization_time + if self.materialized_feature_id is not None: + body["materialized_feature_id"] = self.materialized_feature_id + if self.offline_store_config: + body["offline_store_config"] = self.offline_store_config.as_dict() + if self.online_store_config: + body["online_store_config"] = self.online_store_config.as_dict() + if self.pipeline_schedule_state is not None: + body["pipeline_schedule_state"] = self.pipeline_schedule_state.value + if self.table_name is not None: + body["table_name"] = self.table_name + return body + + def as_shallow_dict(self) -> dict: + """Serializes the MaterializedFeature into a shallow dictionary of its immediate attributes.""" + body = {} + if self.feature_name is not None: + body["feature_name"] = self.feature_name + if self.last_materialization_time is not None: + body["last_materialization_time"] = self.last_materialization_time + if self.materialized_feature_id is not None: + body["materialized_feature_id"] = self.materialized_feature_id + if self.offline_store_config: + body["offline_store_config"] = self.offline_store_config + if self.online_store_config: + body["online_store_config"] = self.online_store_config + if self.pipeline_schedule_state is not None: + body["pipeline_schedule_state"] = self.pipeline_schedule_state + if self.table_name is not None: + body["table_name"] = self.table_name + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> MaterializedFeature: + """Deserializes the MaterializedFeature from a dictionary.""" + return cls( + feature_name=d.get("feature_name", None), + last_materialization_time=d.get("last_materialization_time", None), + materialized_feature_id=d.get("materialized_feature_id", None), + offline_store_config=_from_dict(d, "offline_store_config", OfflineStoreConfig), + online_store_config=_from_dict(d, "online_store_config", OnlineStore), + pipeline_schedule_state=_enum(d, "pipeline_schedule_state", MaterializedFeaturePipelineScheduleState), + table_name=d.get("table_name", None), + ) + + +class MaterializedFeaturePipelineScheduleState(Enum): + + ACTIVE = "ACTIVE" + PAUSED = "PAUSED" + SNAPSHOT = "SNAPSHOT" + + @dataclass class Metric: """Metric associated with a run, represented as a key-value pair.""" @@ -3613,6 +3740,52 @@ def from_dict(cls, d: Dict[str, Any]) -> ModelVersionTag: return cls(key=d.get("key", None), value=d.get("value", None)) +@dataclass +class OfflineStoreConfig: + """Configuration for offline store destination.""" + + catalog_name: str + """The Unity Catalog catalog name.""" + + schema_name: str + """The Unity Catalog schema name.""" + + table_name_prefix: str + """Prefix for Unity Catalog table name. The materialized feature will be stored in a table with + this prefix and a generated postfix.""" + + def as_dict(self) -> dict: + """Serializes the OfflineStoreConfig into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.catalog_name is not None: + body["catalog_name"] = self.catalog_name + if self.schema_name is not None: + body["schema_name"] = self.schema_name + if self.table_name_prefix is not None: + body["table_name_prefix"] = self.table_name_prefix + return body + + def as_shallow_dict(self) -> dict: + """Serializes the OfflineStoreConfig into a shallow dictionary of its immediate attributes.""" + body = {} + if self.catalog_name is not None: + body["catalog_name"] = self.catalog_name + if self.schema_name is not None: + body["schema_name"] = self.schema_name + if self.table_name_prefix is not None: + body["table_name_prefix"] = self.table_name_prefix + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> OfflineStoreConfig: + """Deserializes the OfflineStoreConfig from a dictionary.""" + return cls( + catalog_name=d.get("catalog_name", None), + schema_name=d.get("schema_name", None), + table_name_prefix=d.get("table_name_prefix", None), + ) + + @dataclass class OnlineStore: """An OnlineStore is a logical database instance that stores and serves features online.""" @@ -6563,6 +6736,23 @@ def create_feature(self, feature: Feature) -> Feature: res = self._api.do("POST", "/api/2.0/feature-engineering/features", body=body, headers=headers) return Feature.from_dict(res) + def create_materialized_feature(self, materialized_feature: MaterializedFeature) -> MaterializedFeature: + """Create a materialized feature. + + :param materialized_feature: :class:`MaterializedFeature` + The materialized feature to create. + + :returns: :class:`MaterializedFeature` + """ + body = materialized_feature.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", "/api/2.0/feature-engineering/materialized-features", body=body, headers=headers) + return MaterializedFeature.from_dict(res) + def delete_feature(self, full_name: str): """Delete a Feature. @@ -6578,6 +6768,23 @@ def delete_feature(self, full_name: str): self._api.do("DELETE", f"/api/2.0/feature-engineering/features/{full_name}", headers=headers) + def delete_materialized_feature(self, materialized_feature_id: str): + """Delete a materialized feature. + + :param materialized_feature_id: str + The ID of the materialized feature to delete. + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do( + "DELETE", f"/api/2.0/feature-engineering/materialized-features/{materialized_feature_id}", headers=headers + ) + def get_feature(self, full_name: str) -> Feature: """Get a Feature. @@ -6594,6 +6801,24 @@ def get_feature(self, full_name: str) -> Feature: res = self._api.do("GET", f"/api/2.0/feature-engineering/features/{full_name}", headers=headers) return Feature.from_dict(res) + def get_materialized_feature(self, materialized_feature_id: str) -> MaterializedFeature: + """Get a materialized feature. + + :param materialized_feature_id: str + The ID of the materialized feature. + + :returns: :class:`MaterializedFeature` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do( + "GET", f"/api/2.0/feature-engineering/materialized-features/{materialized_feature_id}", headers=headers + ) + return MaterializedFeature.from_dict(res) + def list_features(self, *, page_size: Optional[int] = None, page_token: Optional[str] = None) -> Iterator[Feature]: """List Features. @@ -6623,6 +6848,45 @@ def list_features(self, *, page_size: Optional[int] = None, page_token: Optional return query["page_token"] = json["next_page_token"] + def list_materialized_features( + self, *, feature_name: Optional[str] = None, page_size: Optional[int] = None, page_token: Optional[str] = None + ) -> Iterator[MaterializedFeature]: + """List materialized features. + + :param feature_name: str (optional) + Filter by feature name. If specified, only materialized features materialized from this feature will + be returned. + :param page_size: int (optional) + The maximum number of results to return. Defaults to 100 if not specified. Cannot be greater than + 1000. + :param page_token: str (optional) + Pagination token to go to the next page based on a previous query. + + :returns: Iterator over :class:`MaterializedFeature` + """ + + query = {} + if feature_name is not None: + query["feature_name"] = feature_name + if page_size is not None: + query["page_size"] = page_size + if page_token is not None: + query["page_token"] = page_token + headers = { + "Accept": "application/json", + } + + while True: + json = self._api.do( + "GET", "/api/2.0/feature-engineering/materialized-features", query=query, headers=headers + ) + if "materialized_features" in json: + for v in json["materialized_features"]: + yield MaterializedFeature.from_dict(v) + if "next_page_token" not in json or not json["next_page_token"]: + return + query["page_token"] = json["next_page_token"] + def update_feature(self, full_name: str, feature: Feature, update_mask: str) -> Feature: """Update a Feature. @@ -6649,6 +6913,39 @@ def update_feature(self, full_name: str, feature: Feature, update_mask: str) -> ) return Feature.from_dict(res) + def update_materialized_feature( + self, materialized_feature_id: str, materialized_feature: MaterializedFeature, update_mask: str + ) -> MaterializedFeature: + """Update a materialized feature (pause/resume). + + :param materialized_feature_id: str + Unique identifier for the materialized feature. + :param materialized_feature: :class:`MaterializedFeature` + The materialized feature to update. + :param update_mask: str + Provide the materialization feature fields which should be updated. Currently, only the + pipeline_state field can be updated. + + :returns: :class:`MaterializedFeature` + """ + body = materialized_feature.as_dict() + query = {} + if update_mask is not None: + query["update_mask"] = update_mask + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "PATCH", + f"/api/2.0/feature-engineering/materialized-features/{materialized_feature_id}", + query=query, + body=body, + headers=headers, + ) + return MaterializedFeature.from_dict(res) + class FeatureStoreAPI: """A feature store is a centralized repository that enables data scientists to find and share features. Using diff --git a/databricks/sdk/service/oauth2.py b/databricks/sdk/service/oauth2.py index 32dcaf8f3..28bae5347 100755 --- a/databricks/sdk/service/oauth2.py +++ b/databricks/sdk/service/oauth2.py @@ -863,17 +863,37 @@ def from_dict(cls, d: Dict[str, Any]) -> SecretInfo: @dataclass class TokenAccessPolicy: + absolute_session_lifetime_in_minutes: Optional[int] = None + """Absolute OAuth session TTL in minutes. Effective only when the single-use refresh token feature + is enabled. This is the absolute TTL of all refresh tokens issued in one OAuth session. When a + new refresh token is issued during refresh token rotation, it will inherit the same absolute TTL + as the old refresh token. In other words, this represents the maximum amount of time a user can + stay logged in without re-authenticating.""" + access_token_ttl_in_minutes: Optional[int] = None """access token time to live in minutes""" + enable_single_use_refresh_tokens: Optional[bool] = None + """Whether to enable single-use refresh tokens (refresh token rotation). If this feature is + enabled, upon successfully getting a new access token using a refresh token, Databricks will + issue a new refresh token along with the access token in the response and invalidate the old + refresh token. The client should use the new refresh token to get access tokens in future + requests.""" + refresh_token_ttl_in_minutes: Optional[int] = None - """refresh token time to live in minutes""" + """Refresh token time to live in minutes. When single-use refresh tokens are enabled, this + represents the TTL of an individual refresh token. If the refresh token is used before it + expires, a new one is issued with a renewed individual TTL.""" def as_dict(self) -> dict: """Serializes the TokenAccessPolicy into a dictionary suitable for use as a JSON request body.""" body = {} + if self.absolute_session_lifetime_in_minutes is not None: + body["absolute_session_lifetime_in_minutes"] = self.absolute_session_lifetime_in_minutes if self.access_token_ttl_in_minutes is not None: body["access_token_ttl_in_minutes"] = self.access_token_ttl_in_minutes + if self.enable_single_use_refresh_tokens is not None: + body["enable_single_use_refresh_tokens"] = self.enable_single_use_refresh_tokens if self.refresh_token_ttl_in_minutes is not None: body["refresh_token_ttl_in_minutes"] = self.refresh_token_ttl_in_minutes return body @@ -881,8 +901,12 @@ def as_dict(self) -> dict: def as_shallow_dict(self) -> dict: """Serializes the TokenAccessPolicy into a shallow dictionary of its immediate attributes.""" body = {} + if self.absolute_session_lifetime_in_minutes is not None: + body["absolute_session_lifetime_in_minutes"] = self.absolute_session_lifetime_in_minutes if self.access_token_ttl_in_minutes is not None: body["access_token_ttl_in_minutes"] = self.access_token_ttl_in_minutes + if self.enable_single_use_refresh_tokens is not None: + body["enable_single_use_refresh_tokens"] = self.enable_single_use_refresh_tokens if self.refresh_token_ttl_in_minutes is not None: body["refresh_token_ttl_in_minutes"] = self.refresh_token_ttl_in_minutes return body @@ -891,7 +915,9 @@ def as_shallow_dict(self) -> dict: def from_dict(cls, d: Dict[str, Any]) -> TokenAccessPolicy: """Deserializes the TokenAccessPolicy from a dictionary.""" return cls( + absolute_session_lifetime_in_minutes=d.get("absolute_session_lifetime_in_minutes", None), access_token_ttl_in_minutes=d.get("access_token_ttl_in_minutes", None), + enable_single_use_refresh_tokens=d.get("enable_single_use_refresh_tokens", None), refresh_token_ttl_in_minutes=d.get("refresh_token_ttl_in_minutes", None), ) diff --git a/databricks/sdk/service/pipelines.py b/databricks/sdk/service/pipelines.py index 6ea0874e5..084d0126d 100755 --- a/databricks/sdk/service/pipelines.py +++ b/databricks/sdk/service/pipelines.py @@ -119,8 +119,8 @@ def from_dict(cls, d: Dict[str, Any]) -> DataPlaneId: class DayOfWeek(Enum): - """Days of week in which the restart is allowed to happen (within a five-hour window starting at - start_hour). If not specified all days of the week will be used.""" + """Days of week in which the window is allowed to happen. If not specified all days of the week + will be used.""" FRIDAY = "FRIDAY" MONDAY = "MONDAY" diff --git a/databricks/sdk/service/provisioning.py b/databricks/sdk/service/provisioning.py index 857f03012..2f173112e 100755 --- a/databricks/sdk/service/provisioning.py +++ b/databricks/sdk/service/provisioning.py @@ -1436,7 +1436,7 @@ class Workspace: azure_workspace_info: Optional[AzureWorkspaceInfo] = None cloud: Optional[str] = None - """The cloud name. This field always has the value `gcp`.""" + """The cloud name. This field can have values like `azure`, `gcp`.""" cloud_resource_container: Optional[CloudResourceContainer] = None @@ -2487,6 +2487,7 @@ def create( gke_config: Optional[GkeConfig] = None, location: Optional[str] = None, managed_services_customer_managed_key_id: Optional[str] = None, + network_connectivity_config_id: Optional[str] = None, network_id: Optional[str] = None, pricing_tier: Optional[PricingTier] = None, private_access_settings_id: Optional[str] = None, @@ -2565,6 +2566,10 @@ def create( The ID of the workspace's managed services encryption key configuration object. This is used to help protect and control access to the workspace's notebooks, secrets, Databricks SQL queries, and query history. The provided key configuration object property use_cases must contain MANAGED_SERVICES. + :param network_connectivity_config_id: str (optional) + The object ID of network connectivity config. Once assigned, the workspace serverless compute + resources use the same set of stable IP CIDR blocks and optional private link to access your + resources. :param network_id: str (optional) The ID of the workspace's network configuration object. To use AWS PrivateLink, this field is required. @@ -2613,6 +2618,8 @@ def create( body["location"] = location if managed_services_customer_managed_key_id is not None: body["managed_services_customer_managed_key_id"] = managed_services_customer_managed_key_id + if network_connectivity_config_id is not None: + body["network_connectivity_config_id"] = network_connectivity_config_id if network_id is not None: body["network_id"] = network_id if pricing_tier is not None: @@ -2653,6 +2660,7 @@ def create_and_wait( gke_config: Optional[GkeConfig] = None, location: Optional[str] = None, managed_services_customer_managed_key_id: Optional[str] = None, + network_connectivity_config_id: Optional[str] = None, network_id: Optional[str] = None, pricing_tier: Optional[PricingTier] = None, private_access_settings_id: Optional[str] = None, @@ -2673,6 +2681,7 @@ def create_and_wait( gke_config=gke_config, location=location, managed_services_customer_managed_key_id=managed_services_customer_managed_key_id, + network_connectivity_config_id=network_connectivity_config_id, network_id=network_id, pricing_tier=pricing_tier, private_access_settings_id=private_access_settings_id, diff --git a/databricks/sdk/service/settings.py b/databricks/sdk/service/settings.py index a37ace4f5..3bc7a2969 100755 --- a/databricks/sdk/service/settings.py +++ b/databricks/sdk/service/settings.py @@ -4183,6 +4183,8 @@ def from_dict(cls, d: Dict[str, Any]) -> NccPrivateEndpointRule: class NccPrivateEndpointRulePrivateLinkConnectionState(Enum): + CREATE_FAILED = "CREATE_FAILED" + CREATING = "CREATING" DISCONNECTED = "DISCONNECTED" ESTABLISHED = "ESTABLISHED" EXPIRED = "EXPIRED" diff --git a/databricks/sdk/service/sharing.py b/databricks/sdk/service/sharing.py index 172307d67..fe45bd6d8 100755 --- a/databricks/sdk/service/sharing.py +++ b/databricks/sdk/service/sharing.py @@ -2987,44 +2987,6 @@ def list( return query["page_token"] = json["next_page_token"] - def update( - self, recipient_name: str, name: str, policy: FederationPolicy, *, update_mask: Optional[str] = None - ) -> FederationPolicy: - """Updates an existing federation policy for an OIDC_RECIPIENT. The caller must be the owner of the - recipient. - - :param recipient_name: str - Name of the recipient. This is the name of the recipient for which the policy is being updated. - :param name: str - Name of the policy. This is the name of the current name of the policy. - :param policy: :class:`FederationPolicy` - :param update_mask: str (optional) - The field mask specifies which fields of the policy to update. To specify multiple fields in the - field mask, use comma as the separator (no space). The special value '*' indicates that all fields - should be updated (full replacement). If unspecified, all fields that are set in the policy provided - in the update request will overwrite the corresponding fields in the existing policy. Example value: - 'comment,oidc_policy.audiences'. - - :returns: :class:`FederationPolicy` - """ - body = policy.as_dict() - query = {} - if update_mask is not None: - query["update_mask"] = update_mask - headers = { - "Accept": "application/json", - "Content-Type": "application/json", - } - - res = self._api.do( - "PATCH", - f"/api/2.0/data-sharing/recipients/{recipient_name}/federation-policies/{name}", - query=query, - body=body, - headers=headers, - ) - return FederationPolicy.from_dict(res) - class RecipientsAPI: """A recipient is an object you create using :method:recipients/create to represent an organization which you diff --git a/databricks/sdk/service/sql.py b/databricks/sdk/service/sql.py index 46d34e96b..afbb45083 100755 --- a/databricks/sdk/service/sql.py +++ b/databricks/sdk/service/sql.py @@ -636,6 +636,19 @@ class AlertState(Enum): @dataclass class AlertV2: + display_name: str + """The display name of the alert.""" + + query_text: str + """Text of the query to be run.""" + + warehouse_id: str + """ID of the SQL warehouse attached to the alert.""" + + evaluation: AlertV2Evaluation + + schedule: CronSchedule + create_time: Optional[str] = None """The timestamp indicating when the alert was created.""" @@ -645,15 +658,10 @@ class AlertV2: custom_summary: Optional[str] = None """Custom summary for the alert. support mustache template.""" - display_name: Optional[str] = None - """The display name of the alert.""" - effective_run_as: Optional[AlertV2RunAs] = None """The actual identity that will be used to execute the alert. This is an output-only field that shows the resolved run-as identity after applying permissions and defaults.""" - evaluation: Optional[AlertV2Evaluation] = None - id: Optional[str] = None """UUID identifying the alert.""" @@ -667,9 +675,6 @@ class AlertV2: """The workspace path of the folder containing the alert. Can only be set on create, and cannot be updated.""" - query_text: Optional[str] = None - """Text of the query to be run.""" - run_as: Optional[AlertV2RunAs] = None """Specifies the identity that will be used to run the alert. This field allows you to configure alerts to run as a specific user or service principal. - For user identity: Set `user_name` to @@ -683,14 +688,9 @@ class AlertV2: servicePrincipal/user role. Deprecated: Use `run_as` field instead. This field will be removed in a future release.""" - schedule: Optional[CronSchedule] = None - update_time: Optional[str] = None """The timestamp indicating when the alert was updated.""" - warehouse_id: Optional[str] = None - """ID of the SQL warehouse attached to the alert.""" - def as_dict(self) -> dict: """Serializes the AlertV2 into a dictionary suitable for use as a JSON request body.""" body = {} @@ -790,7 +790,10 @@ def from_dict(cls, d: Dict[str, Any]) -> AlertV2: @dataclass class AlertV2Evaluation: - comparison_operator: Optional[ComparisonOperator] = None + source: AlertV2OperandColumn + """Source column from result to use to evaluate alert""" + + comparison_operator: ComparisonOperator """Operator used for comparison in alert evaluation.""" empty_result_state: Optional[AlertEvaluationState] = None @@ -803,9 +806,6 @@ class AlertV2Evaluation: notification: Optional[AlertV2Notification] = None """User or Notification Destination to notify when alert is triggered.""" - source: Optional[AlertV2OperandColumn] = None - """Source column from result to use to evaluate alert""" - state: Optional[AlertEvaluationState] = None """Latest state of alert evaluation.""" @@ -941,12 +941,12 @@ def from_dict(cls, d: Dict[str, Any]) -> AlertV2Operand: @dataclass class AlertV2OperandColumn: + name: str + aggregation: Optional[Aggregation] = None display: Optional[str] = None - name: Optional[str] = None - def as_dict(self) -> dict: """Serializes the AlertV2OperandColumn into a dictionary suitable for use as a JSON request body.""" body = {} @@ -1718,20 +1718,20 @@ def from_dict(cls, d: Dict[str, Any]) -> CreateWarehouseResponse: @dataclass class CronSchedule: - pause_status: Optional[SchedulePauseStatus] = None - """Indicate whether this schedule is paused or not.""" - - quartz_cron_schedule: Optional[str] = None + quartz_cron_schedule: str """A cron expression using quartz syntax that specifies the schedule for this pipeline. Should use the quartz format described here: http://www.quartz-scheduler.org/documentation/quartz-2.1.7/tutorials/tutorial-lesson-06.html""" - timezone_id: Optional[str] = None + timezone_id: str """A Java timezone id. The schedule will be resolved using this timezone. This will be combined with the quartz_cron_schedule to determine the schedule. See https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html for details.""" + pause_status: Optional[SchedulePauseStatus] = None + """Indicate whether this schedule is paused or not.""" + def as_dict(self) -> dict: """Serializes the CronSchedule into a dictionary suitable for use as a JSON request body.""" body = {} @@ -3929,9 +3929,6 @@ class ListAlertsV2Response: next_page_token: Optional[str] = None - results: Optional[List[AlertV2]] = None - """Deprecated. Use `alerts` instead.""" - def as_dict(self) -> dict: """Serializes the ListAlertsV2Response into a dictionary suitable for use as a JSON request body.""" body = {} @@ -3939,8 +3936,6 @@ def as_dict(self) -> dict: body["alerts"] = [v.as_dict() for v in self.alerts] if self.next_page_token is not None: body["next_page_token"] = self.next_page_token - if self.results: - body["results"] = [v.as_dict() for v in self.results] return body def as_shallow_dict(self) -> dict: @@ -3950,18 +3945,12 @@ def as_shallow_dict(self) -> dict: body["alerts"] = self.alerts if self.next_page_token is not None: body["next_page_token"] = self.next_page_token - if self.results: - body["results"] = self.results return body @classmethod def from_dict(cls, d: Dict[str, Any]) -> ListAlertsV2Response: """Deserializes the ListAlertsV2Response from a dictionary.""" - return cls( - alerts=_repeated_dict(d, "alerts", AlertV2), - next_page_token=d.get("next_page_token", None), - results=_repeated_dict(d, "results", AlertV2), - ) + return cls(alerts=_repeated_dict(d, "alerts", AlertV2), next_page_token=d.get("next_page_token", None)) class ListOrder(Enum): @@ -6378,11 +6367,21 @@ class TerminationReasonCode(Enum): NEPHOS_RESOURCE_MANAGEMENT = "NEPHOS_RESOURCE_MANAGEMENT" NETVISOR_SETUP_TIMEOUT = "NETVISOR_SETUP_TIMEOUT" NETWORK_CHECK_CONTROL_PLANE_FAILURE = "NETWORK_CHECK_CONTROL_PLANE_FAILURE" + NETWORK_CHECK_CONTROL_PLANE_FAILURE_DUE_TO_MISCONFIG = "NETWORK_CHECK_CONTROL_PLANE_FAILURE_DUE_TO_MISCONFIG" NETWORK_CHECK_DNS_SERVER_FAILURE = "NETWORK_CHECK_DNS_SERVER_FAILURE" + NETWORK_CHECK_DNS_SERVER_FAILURE_DUE_TO_MISCONFIG = "NETWORK_CHECK_DNS_SERVER_FAILURE_DUE_TO_MISCONFIG" NETWORK_CHECK_METADATA_ENDPOINT_FAILURE = "NETWORK_CHECK_METADATA_ENDPOINT_FAILURE" + NETWORK_CHECK_METADATA_ENDPOINT_FAILURE_DUE_TO_MISCONFIG = ( + "NETWORK_CHECK_METADATA_ENDPOINT_FAILURE_DUE_TO_MISCONFIG" + ) NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE = "NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE" + NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE_DUE_TO_MISCONFIG = ( + "NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE_DUE_TO_MISCONFIG" + ) NETWORK_CHECK_NIC_FAILURE = "NETWORK_CHECK_NIC_FAILURE" + NETWORK_CHECK_NIC_FAILURE_DUE_TO_MISCONFIG = "NETWORK_CHECK_NIC_FAILURE_DUE_TO_MISCONFIG" NETWORK_CHECK_STORAGE_FAILURE = "NETWORK_CHECK_STORAGE_FAILURE" + NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG = "NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG" NETWORK_CONFIGURATION_FAILURE = "NETWORK_CONFIGURATION_FAILURE" NFS_MOUNT_FAILURE = "NFS_MOUNT_FAILURE" NO_ACTIVATED_K8S = "NO_ACTIVATED_K8S" @@ -7736,8 +7735,8 @@ def list_alerts(self, *, page_size: Optional[int] = None, page_token: Optional[s while True: json = self._api.do("GET", "/api/2.0/alerts", query=query, headers=headers) - if "results" in json: - for v in json["results"]: + if "alerts" in json: + for v in json["alerts"]: yield AlertV2.from_dict(v) if "next_page_token" not in json or not json["next_page_token"]: return diff --git a/docs/account/provisioning/workspaces.rst b/docs/account/provisioning/workspaces.rst index e004e9cc0..dfdda454d 100644 --- a/docs/account/provisioning/workspaces.rst +++ b/docs/account/provisioning/workspaces.rst @@ -11,7 +11,7 @@ These endpoints are available if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account. - .. py:method:: create( [, aws_region: Optional[str], cloud: Optional[str], cloud_resource_container: Optional[CloudResourceContainer], compute_mode: Optional[CustomerFacingComputeMode], credentials_id: Optional[str], custom_tags: Optional[Dict[str, str]], deployment_name: Optional[str], gcp_managed_network_config: Optional[GcpManagedNetworkConfig], gke_config: Optional[GkeConfig], location: Optional[str], managed_services_customer_managed_key_id: Optional[str], network_id: Optional[str], pricing_tier: Optional[PricingTier], private_access_settings_id: Optional[str], storage_configuration_id: Optional[str], storage_customer_managed_key_id: Optional[str], workspace_name: Optional[str]]) -> Wait[Workspace] + .. py:method:: create( [, aws_region: Optional[str], cloud: Optional[str], cloud_resource_container: Optional[CloudResourceContainer], compute_mode: Optional[CustomerFacingComputeMode], credentials_id: Optional[str], custom_tags: Optional[Dict[str, str]], deployment_name: Optional[str], gcp_managed_network_config: Optional[GcpManagedNetworkConfig], gke_config: Optional[GkeConfig], location: Optional[str], managed_services_customer_managed_key_id: Optional[str], network_connectivity_config_id: Optional[str], network_id: Optional[str], pricing_tier: Optional[PricingTier], private_access_settings_id: Optional[str], storage_configuration_id: Optional[str], storage_customer_managed_key_id: Optional[str], workspace_name: Optional[str]]) -> Wait[Workspace] Usage: @@ -121,6 +121,10 @@ The ID of the workspace's managed services encryption key configuration object. This is used to help protect and control access to the workspace's notebooks, secrets, Databricks SQL queries, and query history. The provided key configuration object property use_cases must contain MANAGED_SERVICES. + :param network_connectivity_config_id: str (optional) + The object ID of network connectivity config. Once assigned, the workspace serverless compute + resources use the same set of stable IP CIDR blocks and optional private link to access your + resources. :param network_id: str (optional) The ID of the workspace's network configuration object. To use AWS PrivateLink, this field is required. @@ -147,7 +151,7 @@ See :method:wait_get_workspace_running for more details. - .. py:method:: create_and_wait( [, aws_region: Optional[str], cloud: Optional[str], cloud_resource_container: Optional[CloudResourceContainer], compute_mode: Optional[CustomerFacingComputeMode], credentials_id: Optional[str], custom_tags: Optional[Dict[str, str]], deployment_name: Optional[str], gcp_managed_network_config: Optional[GcpManagedNetworkConfig], gke_config: Optional[GkeConfig], location: Optional[str], managed_services_customer_managed_key_id: Optional[str], network_id: Optional[str], pricing_tier: Optional[PricingTier], private_access_settings_id: Optional[str], storage_configuration_id: Optional[str], storage_customer_managed_key_id: Optional[str], workspace_name: Optional[str], timeout: datetime.timedelta = 0:20:00]) -> Workspace + .. py:method:: create_and_wait( [, aws_region: Optional[str], cloud: Optional[str], cloud_resource_container: Optional[CloudResourceContainer], compute_mode: Optional[CustomerFacingComputeMode], credentials_id: Optional[str], custom_tags: Optional[Dict[str, str]], deployment_name: Optional[str], gcp_managed_network_config: Optional[GcpManagedNetworkConfig], gke_config: Optional[GkeConfig], location: Optional[str], managed_services_customer_managed_key_id: Optional[str], network_connectivity_config_id: Optional[str], network_id: Optional[str], pricing_tier: Optional[PricingTier], private_access_settings_id: Optional[str], storage_configuration_id: Optional[str], storage_customer_managed_key_id: Optional[str], workspace_name: Optional[str], timeout: datetime.timedelta = 0:20:00]) -> Workspace .. py:method:: delete(workspace_id: int) -> Workspace diff --git a/docs/dbdataclasses/catalog.rst b/docs/dbdataclasses/catalog.rst index 428ca2204..a38b2353f 100644 --- a/docs/dbdataclasses/catalog.rst +++ b/docs/dbdataclasses/catalog.rst @@ -279,7 +279,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: ConnectionType - Next Id: 38 + Next Id: 46 .. py:attribute:: BIGQUERY :value: "BIGQUERY" @@ -423,7 +423,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: CredentialType - Next Id: 13 + Next Id: 14 .. py:attribute:: ANY_STATIC_CREDENTIAL :value: "ANY_STATIC_CREDENTIAL" @@ -437,6 +437,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: OAUTH_M2M :value: "OAUTH_M2M" + .. py:attribute:: OAUTH_MTLS + :value: "OAUTH_MTLS" + .. py:attribute:: OAUTH_REFRESH_TOKEN :value: "OAUTH_REFRESH_TOKEN" @@ -1501,7 +1504,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: SecurableKind - Latest kind: CONNECTION_REDSHIFT_IAM = 265; Next id:266 + Latest kind: CONNECTION_SALESFORCE_OAUTH_MTLS = 268; Next id:269 .. py:attribute:: TABLE_DB_STORAGE :value: "TABLE_DB_STORAGE" diff --git a/docs/dbdataclasses/compute.rst b/docs/dbdataclasses/compute.rst index 1af334270..23312c5d4 100644 --- a/docs/dbdataclasses/compute.rst +++ b/docs/dbdataclasses/compute.rst @@ -1390,21 +1390,39 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: NETWORK_CHECK_CONTROL_PLANE_FAILURE :value: "NETWORK_CHECK_CONTROL_PLANE_FAILURE" + .. py:attribute:: NETWORK_CHECK_CONTROL_PLANE_FAILURE_DUE_TO_MISCONFIG + :value: "NETWORK_CHECK_CONTROL_PLANE_FAILURE_DUE_TO_MISCONFIG" + .. py:attribute:: NETWORK_CHECK_DNS_SERVER_FAILURE :value: "NETWORK_CHECK_DNS_SERVER_FAILURE" + .. py:attribute:: NETWORK_CHECK_DNS_SERVER_FAILURE_DUE_TO_MISCONFIG + :value: "NETWORK_CHECK_DNS_SERVER_FAILURE_DUE_TO_MISCONFIG" + .. py:attribute:: NETWORK_CHECK_METADATA_ENDPOINT_FAILURE :value: "NETWORK_CHECK_METADATA_ENDPOINT_FAILURE" + .. py:attribute:: NETWORK_CHECK_METADATA_ENDPOINT_FAILURE_DUE_TO_MISCONFIG + :value: "NETWORK_CHECK_METADATA_ENDPOINT_FAILURE_DUE_TO_MISCONFIG" + .. py:attribute:: NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE :value: "NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE" + .. py:attribute:: NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE_DUE_TO_MISCONFIG + :value: "NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE_DUE_TO_MISCONFIG" + .. py:attribute:: NETWORK_CHECK_NIC_FAILURE :value: "NETWORK_CHECK_NIC_FAILURE" + .. py:attribute:: NETWORK_CHECK_NIC_FAILURE_DUE_TO_MISCONFIG + :value: "NETWORK_CHECK_NIC_FAILURE_DUE_TO_MISCONFIG" + .. py:attribute:: NETWORK_CHECK_STORAGE_FAILURE :value: "NETWORK_CHECK_STORAGE_FAILURE" + .. py:attribute:: NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG + :value: "NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG" + .. py:attribute:: NETWORK_CONFIGURATION_FAILURE :value: "NETWORK_CONFIGURATION_FAILURE" diff --git a/docs/dbdataclasses/ml.rst b/docs/dbdataclasses/ml.rst index 5bd7720e8..91c4ce5b3 100644 --- a/docs/dbdataclasses/ml.rst +++ b/docs/dbdataclasses/ml.rst @@ -429,6 +429,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ListMaterializedFeaturesResponse + :members: + :undoc-members: + .. autoclass:: ListModelsResponse :members: :undoc-members: @@ -506,6 +510,21 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: MaterializedFeature + :members: + :undoc-members: + +.. py:class:: MaterializedFeaturePipelineScheduleState + + .. py:attribute:: ACTIVE + :value: "ACTIVE" + + .. py:attribute:: PAUSED + :value: "PAUSED" + + .. py:attribute:: SNAPSHOT + :value: "SNAPSHOT" + .. autoclass:: Metric :members: :undoc-members: @@ -557,6 +576,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: OfflineStoreConfig + :members: + :undoc-members: + .. autoclass:: OnlineStore :members: :undoc-members: diff --git a/docs/dbdataclasses/pipelines.rst b/docs/dbdataclasses/pipelines.rst index b49b6bd1c..aaf5b4b23 100644 --- a/docs/dbdataclasses/pipelines.rst +++ b/docs/dbdataclasses/pipelines.rst @@ -18,7 +18,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: DayOfWeek - Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour). If not specified all days of the week will be used. + Days of week in which the window is allowed to happen. If not specified all days of the week will be used. .. py:attribute:: FRIDAY :value: "FRIDAY" diff --git a/docs/dbdataclasses/settings.rst b/docs/dbdataclasses/settings.rst index cd77d7d2c..0f4a7b3d3 100644 --- a/docs/dbdataclasses/settings.rst +++ b/docs/dbdataclasses/settings.rst @@ -645,6 +645,12 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: NccPrivateEndpointRulePrivateLinkConnectionState + .. py:attribute:: CREATE_FAILED + :value: "CREATE_FAILED" + + .. py:attribute:: CREATING + :value: "CREATING" + .. py:attribute:: DISCONNECTED :value: "DISCONNECTED" diff --git a/docs/dbdataclasses/sql.rst b/docs/dbdataclasses/sql.rst index 17ebe326f..98cbd832f 100644 --- a/docs/dbdataclasses/sql.rst +++ b/docs/dbdataclasses/sql.rst @@ -1475,21 +1475,39 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: NETWORK_CHECK_CONTROL_PLANE_FAILURE :value: "NETWORK_CHECK_CONTROL_PLANE_FAILURE" + .. py:attribute:: NETWORK_CHECK_CONTROL_PLANE_FAILURE_DUE_TO_MISCONFIG + :value: "NETWORK_CHECK_CONTROL_PLANE_FAILURE_DUE_TO_MISCONFIG" + .. py:attribute:: NETWORK_CHECK_DNS_SERVER_FAILURE :value: "NETWORK_CHECK_DNS_SERVER_FAILURE" + .. py:attribute:: NETWORK_CHECK_DNS_SERVER_FAILURE_DUE_TO_MISCONFIG + :value: "NETWORK_CHECK_DNS_SERVER_FAILURE_DUE_TO_MISCONFIG" + .. py:attribute:: NETWORK_CHECK_METADATA_ENDPOINT_FAILURE :value: "NETWORK_CHECK_METADATA_ENDPOINT_FAILURE" + .. py:attribute:: NETWORK_CHECK_METADATA_ENDPOINT_FAILURE_DUE_TO_MISCONFIG + :value: "NETWORK_CHECK_METADATA_ENDPOINT_FAILURE_DUE_TO_MISCONFIG" + .. py:attribute:: NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE :value: "NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE" + .. py:attribute:: NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE_DUE_TO_MISCONFIG + :value: "NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE_DUE_TO_MISCONFIG" + .. py:attribute:: NETWORK_CHECK_NIC_FAILURE :value: "NETWORK_CHECK_NIC_FAILURE" + .. py:attribute:: NETWORK_CHECK_NIC_FAILURE_DUE_TO_MISCONFIG + :value: "NETWORK_CHECK_NIC_FAILURE_DUE_TO_MISCONFIG" + .. py:attribute:: NETWORK_CHECK_STORAGE_FAILURE :value: "NETWORK_CHECK_STORAGE_FAILURE" + .. py:attribute:: NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG + :value: "NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG" + .. py:attribute:: NETWORK_CONFIGURATION_FAILURE :value: "NETWORK_CONFIGURATION_FAILURE" diff --git a/docs/workspace/dataquality/data_quality.rst b/docs/workspace/dataquality/data_quality.rst index 5a64d4dcd..b40ceac0a 100644 --- a/docs/workspace/dataquality/data_quality.rst +++ b/docs/workspace/dataquality/data_quality.rst @@ -8,12 +8,28 @@ .. py:method:: cancel_refresh(object_type: str, object_id: str, refresh_id: int) -> CancelRefreshResponse - Cancels a data quality monitor refresh. Currently only supported for the `table` `object_type`. + Cancels a data quality monitor refresh. Currently only supported for the `table` `object_type`. The + call must be made in the same workspace as where the monitor was created. + + The caller must have either of the following sets of permissions: 1. **MANAGE** and **USE_CATALOG** on + the table's parent catalog. 2. **USE_CATALOG** on the table's parent catalog, and **MANAGE** and + **USE_SCHEMA** on the table's parent schema. 3. **USE_CATALOG** on the table's parent catalog, + **USE_SCHEMA** on the table's parent schema, and **MANAGE** on the table. :param object_type: str The type of the monitored object. Can be one of the following: `schema` or `table`. :param object_id: str - The UUID of the request object. For example, schema id. + The UUID of the request object. It is `schema_id` for `schema`, and `table_id` for `table`. + + Find the `schema_id` from either: 1. The [schema_id] of the `Schemas` resource. 2. In [Catalog + Explorer] > select the `schema` > go to the `Details` tab > the `Schema ID` field. + + Find the `table_id` from either: 1. The [table_id] of the `Tables` resource. 2. In [Catalog + Explorer] > select the `table` > go to the `Details` tab > the `Table ID` field. + + [Catalog Explorer]: https://docs.databricks.com/aws/en/catalog-explorer/ + [schema_id]: https://docs.databricks.com/api/workspace/schemas/get#schema_id + [table_id]: https://docs.databricks.com/api/workspace/tables/get#table_id :param refresh_id: int Unique id of the refresh operation. @@ -25,14 +41,19 @@ Create a data quality monitor on a Unity Catalog object. The caller must provide either `anomaly_detection_config` for a schema monitor or `data_profiling_config` for a table monitor. - For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent catalog, - have **USE_SCHEMA** on the table's parent schema, and have **SELECT** access on the table 2. have - **USE_CATALOG** on the table's parent catalog, be an owner of the table's parent schema, and have - **SELECT** access on the table. 3. have the following permissions: - **USE_CATALOG** on the table's - parent catalog - **USE_SCHEMA** on the table's parent schema - be an owner of the table. + For the `table` `object_type`, the caller must have either of the following sets of permissions: 1. + **MANAGE** and **USE_CATALOG** on the table's parent catalog, **USE_SCHEMA** on the table's parent + schema, and **SELECT** on the table 2. **USE_CATALOG** on the table's parent catalog, **MANAGE** and + **USE_SCHEMA** on the table's parent schema, and **SELECT** on the table. 3. **USE_CATALOG** on the + table's parent catalog, **USE_SCHEMA** on the table's parent schema, and **MANAGE** and **SELECT** on + the table. Workspace assets, such as the dashboard, will be created in the workspace where this call was made. + For the `schema` `object_type`, the caller must have either of the following sets of permissions: 1. + **MANAGE** and **USE_CATALOG** on the schema's parent catalog. 2. **USE_CATALOG** on the schema's + parent catalog, and **MANAGE** and **USE_SCHEMA** on the schema. + :param monitor: :class:`Monitor` The monitor to create. @@ -41,17 +62,28 @@ .. py:method:: create_refresh(object_type: str, object_id: str, refresh: Refresh) -> Refresh - Creates a refresh. Currently only supported for the `table` `object_type`. + Creates a refresh. Currently only supported for the `table` `object_type`. The call must be made in + the same workspace as where the monitor was created. - The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the - table's parent catalog and be an owner of the table's parent schema 3. have the following permissions: - - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an - owner of the table + The caller must have either of the following sets of permissions: 1. **MANAGE** and **USE_CATALOG** on + the table's parent catalog. 2. **USE_CATALOG** on the table's parent catalog, and **MANAGE** and + **USE_SCHEMA** on the table's parent schema. 3. **USE_CATALOG** on the table's parent catalog, + **USE_SCHEMA** on the table's parent schema, and **MANAGE** on the table. :param object_type: str The type of the monitored object. Can be one of the following: `schema`or `table`. :param object_id: str - The UUID of the request object. For example, table id. + The UUID of the request object. It is `schema_id` for `schema`, and `table_id` for `table`. + + Find the `schema_id` from either: 1. The [schema_id] of the `Schemas` resource. 2. In [Catalog + Explorer] > select the `schema` > go to the `Details` tab > the `Schema ID` field. + + Find the `table_id` from either: 1. The [table_id] of the `Tables` resource. 2. In [Catalog + Explorer] > select the `table` > go to the `Details` tab > the `Table ID` field. + + [Catalog Explorer]: https://docs.databricks.com/aws/en/catalog-explorer/ + [schema_id]: https://docs.databricks.com/api/workspace/schemas/get#schema_id + [table_id]: https://docs.databricks.com/api/workspace/tables/get#table_id :param refresh: :class:`Refresh` The refresh to create @@ -62,18 +94,32 @@ Delete a data quality monitor on Unity Catalog object. - For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent catalog 2. - have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema 3. - have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on - the table's parent schema - be an owner of the table. + For the `table` `object_type`, the caller must have either of the following sets of permissions: + **MANAGE** and **USE_CATALOG** on the table's parent catalog. **USE_CATALOG** on the table's parent + catalog, and **MANAGE** and **USE_SCHEMA** on the table's parent schema. **USE_CATALOG** on the + table's parent catalog, **USE_SCHEMA** on the table's parent schema, and **MANAGE** on the table. Note that the metric tables and dashboard will not be deleted as part of this call; those assets must be manually cleaned up (if desired). + For the `schema` `object_type`, the caller must have either of the following sets of permissions: 1. + **MANAGE** and **USE_CATALOG** on the schema's parent catalog. 2. **USE_CATALOG** on the schema's + parent catalog, and **MANAGE** and **USE_SCHEMA** on the schema. + :param object_type: str The type of the monitored object. Can be one of the following: `schema` or `table`. :param object_id: str - The UUID of the request object. For example, schema id. + The UUID of the request object. It is `schema_id` for `schema`, and `table_id` for `table`. + + Find the `schema_id` from either: 1. The [schema_id] of the `Schemas` resource. 2. In [Catalog + Explorer] > select the `schema` > go to the `Details` tab > the `Schema ID` field. + + Find the `table_id` from either: 1. The [table_id] of the `Tables` resource. 2. In [Catalog + Explorer] > select the `table` > go to the `Details` tab > the `Table ID` field. + + [Catalog Explorer]: https://docs.databricks.com/aws/en/catalog-explorer/ + [schema_id]: https://docs.databricks.com/api/workspace/schemas/get#schema_id + [table_id]: https://docs.databricks.com/api/workspace/tables/get#table_id @@ -85,7 +131,17 @@ :param object_type: str The type of the monitored object. Can be one of the following: `schema` or `table`. :param object_id: str - The UUID of the request object. For example, schema id. + The UUID of the request object. It is `schema_id` for `schema`, and `table_id` for `table`. + + Find the `schema_id` from either: 1. The [schema_id] of the `Schemas` resource. 2. In [Catalog + Explorer] > select the `schema` > go to the `Details` tab > the `Schema ID` field. + + Find the `table_id` from either: 1. The [table_id] of the `Tables` resource. 2. In [Catalog + Explorer] > select the `table` > go to the `Details` tab > the `Table ID` field. + + [Catalog Explorer]: https://docs.databricks.com/aws/en/catalog-explorer/ + [schema_id]: https://docs.databricks.com/api/workspace/schemas/get#schema_id + [table_id]: https://docs.databricks.com/api/workspace/tables/get#table_id :param refresh_id: int Unique id of the refresh operation. @@ -94,38 +150,67 @@ .. py:method:: get_monitor(object_type: str, object_id: str) -> Monitor - Read a data quality monitor on Unity Catalog object. + Read a data quality monitor on a Unity Catalog object. - For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent catalog 2. - have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema. 3. - have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on - the table's parent schema - **SELECT** privilege on the table. + For the `table` `object_type`, the caller must have either of the following sets of permissions: 1. + **MANAGE** and **USE_CATALOG** on the table's parent catalog. 2. **USE_CATALOG** on the table's parent + catalog, and **MANAGE** and **USE_SCHEMA** on the table's parent schema. 3. **USE_CATALOG** on the + table's parent catalog, **USE_SCHEMA** on the table's parent schema, and **SELECT** on the table. - The returned information includes configuration values, as well as information on assets created by - the monitor. Some information (e.g., dashboard) may be filtered out if the caller is in a different - workspace than where the monitor was created. + For the `schema` `object_type`, the caller must have either of the following sets of permissions: 1. + **MANAGE** and **USE_CATALOG** on the schema's parent catalog. 2. **USE_CATALOG** on the schema's + parent catalog, and **USE_SCHEMA** on the schema. + + The returned information includes configuration values on the entity and parent entity as well as + information on assets created by the monitor. Some information (e.g. dashboard) may be filtered out if + the caller is in a different workspace than where the monitor was created. :param object_type: str The type of the monitored object. Can be one of the following: `schema` or `table`. :param object_id: str - The UUID of the request object. For example, schema id. + The UUID of the request object. It is `schema_id` for `schema`, and `table_id` for `table`. + + Find the `schema_id` from either: 1. The [schema_id] of the `Schemas` resource. 2. In [Catalog + Explorer] > select the `schema` > go to the `Details` tab > the `Schema ID` field. + + Find the `table_id` from either: 1. The [table_id] of the `Tables` resource. 2. In [Catalog + Explorer] > select the `table` > go to the `Details` tab > the `Table ID` field. + + [Catalog Explorer]: https://docs.databricks.com/aws/en/catalog-explorer/ + [schema_id]: https://docs.databricks.com/api/workspace/schemas/get#schema_id + [table_id]: https://docs.databricks.com/api/workspace/tables/get#table_id :returns: :class:`Monitor` .. py:method:: get_refresh(object_type: str, object_id: str, refresh_id: int) -> Refresh - Get data quality monitor refresh. + Get data quality monitor refresh. The call must be made in the same workspace as where the monitor was + created. + + For the `table` `object_type`, the caller must have either of the following sets of permissions: 1. + **MANAGE** and **USE_CATALOG** on the table's parent catalog. 2. **USE_CATALOG** on the table's parent + catalog, and **MANAGE** and **USE_SCHEMA** on the table's parent schema. 3. **USE_CATALOG** on the + table's parent catalog, **USE_SCHEMA** on the table's parent schema, and **SELECT** on the table. - For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent catalog 2. - have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema 3. - have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on - the table's parent schema - **SELECT** privilege on the table. + For the `schema` `object_type`, the caller must have either of the following sets of permissions: 1. + **MANAGE** and **USE_CATALOG** on the schema's parent catalog. 2. **USE_CATALOG** on the schema's + parent catalog, and **USE_SCHEMA** on the schema. :param object_type: str The type of the monitored object. Can be one of the following: `schema` or `table`. :param object_id: str - The UUID of the request object. For example, schema id. + The UUID of the request object. It is `schema_id` for `schema`, and `table_id` for `table`. + + Find the `schema_id` from either: 1. The [schema_id] of the `Schemas` resource. 2. In [Catalog + Explorer] > select the `schema` > go to the `Details` tab > the `Schema ID` field. + + Find the `table_id` from either: 1. The [table_id] of the `Tables` resource. 2. In [Catalog + Explorer] > select the `table` > go to the `Details` tab > the `Table ID` field. + + [Catalog Explorer]: https://docs.databricks.com/aws/en/catalog-explorer/ + [schema_id]: https://docs.databricks.com/api/workspace/schemas/get#schema_id + [table_id]: https://docs.databricks.com/api/workspace/tables/get#table_id :param refresh_id: int Unique id of the refresh operation. @@ -144,17 +229,32 @@ .. py:method:: list_refresh(object_type: str, object_id: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[Refresh] - List data quality monitor refreshes. + List data quality monitor refreshes. The call must be made in the same workspace as where the monitor + was created. + + For the `table` `object_type`, the caller must have either of the following sets of permissions: 1. + **MANAGE** and **USE_CATALOG** on the table's parent catalog. 2. **USE_CATALOG** on the table's parent + catalog, and **MANAGE** and **USE_SCHEMA** on the table's parent schema. 3. **USE_CATALOG** on the + table's parent catalog, **USE_SCHEMA** on the table's parent schema, and **SELECT** on the table. - For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent catalog 2. - have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema 3. - have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on - the table's parent schema - **SELECT** privilege on the table. + For the `schema` `object_type`, the caller must have either of the following sets of permissions: 1. + **MANAGE** and **USE_CATALOG** on the schema's parent catalog. 2. **USE_CATALOG** on the schema's + parent catalog, and **USE_SCHEMA** on the schema. :param object_type: str The type of the monitored object. Can be one of the following: `schema` or `table`. :param object_id: str - The UUID of the request object. For example, schema id. + The UUID of the request object. It is `schema_id` for `schema`, and `table_id` for `table`. + + Find the `schema_id` from either: 1. The [schema_id] of the `Schemas` resource. 2. In [Catalog + Explorer] > select the `schema` > go to the `Details` tab > the `Schema ID` field. + + Find the `table_id` from either: 1. The [table_id] of the `Tables` resource. 2. In [Catalog + Explorer] > select the `table` > go to the `Details` tab > the `Table ID` field. + + [Catalog Explorer]: https://docs.databricks.com/aws/en/catalog-explorer/ + [schema_id]: https://docs.databricks.com/api/workspace/schemas/get#schema_id + [table_id]: https://docs.databricks.com/api/workspace/tables/get#table_id :param page_size: int (optional) :param page_token: str (optional) @@ -165,15 +265,29 @@ Update a data quality monitor on Unity Catalog object. - For the `table` `object_type`, The caller must either: 1. be an owner of the table's parent catalog 2. - have **USE_CATALOG** on the table's parent catalog and be an owner of the table's parent schema 3. - have the following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on - the table's parent schema - be an owner of the table. + For the `table` `object_type`, the caller must have either of the following sets of permissions: 1. + **MANAGE** and **USE_CATALOG** on the table's parent catalog. 2. **USE_CATALOG** on the table's parent + catalog, and **MANAGE** and **USE_SCHEMA** on the table's parent schema. 3. **USE_CATALOG** on the + table's parent catalog, **USE_SCHEMA** on the table's parent schema, and **MANAGE** on the table. + + For the `schema` `object_type`, the caller must have either of the following sets of permissions: 1. + **MANAGE** and **USE_CATALOG** on the schema's parent catalog. 2. **USE_CATALOG** on the schema's + parent catalog, and **MANAGE** and **USE_SCHEMA** on the schema. :param object_type: str The type of the monitored object. Can be one of the following: `schema` or `table`. :param object_id: str - The UUID of the request object. For example, schema id. + The UUID of the request object. It is `schema_id` for `schema`, and `table_id` for `table`. + + Find the `schema_id` from either: 1. The [schema_id] of the `Schemas` resource. 2. In [Catalog + Explorer] > select the `schema` > go to the `Details` tab > the `Schema ID` field. + + Find the `table_id` from either: 1. The [table_id] of the `Tables` resource. 2. In [Catalog + Explorer] > select the `table` > go to the `Details` tab > the `Table ID` field. + + [Catalog Explorer]: https://docs.databricks.com/aws/en/catalog-explorer/ + [schema_id]: https://docs.databricks.com/api/workspace/schemas/get#schema_id + [table_id]: https://docs.databricks.com/api/workspace/tables/get#table_id :param monitor: :class:`Monitor` The monitor to update. :param update_mask: str @@ -190,7 +304,17 @@ :param object_type: str The type of the monitored object. Can be one of the following: `schema` or `table`. :param object_id: str - The UUID of the request object. For example, schema id. + The UUID of the request object. It is `schema_id` for `schema`, and `table_id` for `table`. + + Find the `schema_id` from either: 1. The [schema_id] of the `Schemas` resource. 2. In [Catalog + Explorer] > select the `schema` > go to the `Details` tab > the `Schema ID` field. + + Find the `table_id` from either: 1. The [table_id] of the `Tables` resource. 2. In [Catalog + Explorer] > select the `table` > go to the `Details` tab > the `Table ID` field. + + [Catalog Explorer]: https://docs.databricks.com/aws/en/catalog-explorer/ + [schema_id]: https://docs.databricks.com/api/workspace/schemas/get#schema_id + [table_id]: https://docs.databricks.com/api/workspace/tables/get#table_id :param refresh_id: int Unique id of the refresh operation. :param refresh: :class:`Refresh` diff --git a/docs/workspace/jobs/jobs.rst b/docs/workspace/jobs/jobs.rst index e1d8f668f..39beecc1b 100644 --- a/docs/workspace/jobs/jobs.rst +++ b/docs/workspace/jobs/jobs.rst @@ -706,6 +706,10 @@ :param dbt_commands: List[str] (optional) An array of commands to execute for jobs with the dbt task, for example `"dbt_commands": ["dbt deps", "dbt seed", "dbt deps", "dbt seed", "dbt run"]` + + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. + + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown :param jar_params: List[str] (optional) A list of parameters for jobs with Spark JAR tasks, for example `"jar_params": ["john doe", "35"]`. The parameters are used to invoke the main function of the main class specified in the Spark JAR @@ -713,9 +717,9 @@ in conjunction with notebook_params. The JSON representation of this field (for example `{"jar_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables] to set parameters containing information about job runs. + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. - [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown :param job_parameters: Dict[str,str] (optional) Job-level parameters used in the run. for example `"param": "overriding_val"` :param latest_repair_id: int (optional) @@ -730,13 +734,13 @@ notebook_params cannot be specified in conjunction with jar_params. - Use [Task parameter variables] to set parameters containing information about job runs. + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. The JSON representation of this field (for example `{"notebook_params":{"name":"john doe","age":"35"}}`) cannot exceed 10,000 bytes. - [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown :param performance_target: :class:`PerformanceTarget` (optional) The performance mode on a serverless job. The performance target determines the level of compute performance or cost-efficiency for the run. This field overrides the performance target defined on @@ -754,7 +758,7 @@ would overwrite the parameters specified in job setting. The JSON representation of this field (for example `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables] to set parameters containing information about job runs. + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. Important @@ -762,7 +766,7 @@ returns an error. Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis. - [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown :param rerun_all_failed_tasks: bool (optional) If true, repair all failed tasks. Only one of `rerun_tasks` or `rerun_all_failed_tasks` can be used. :param rerun_dependent_tasks: bool (optional) @@ -777,7 +781,7 @@ in job setting. The JSON representation of this field (for example `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables] to set parameters containing information about job runs + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. Important @@ -785,11 +789,15 @@ returns an error. Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis. - [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown :param sql_params: Dict[str,str] (optional) A map from keys to values for jobs with SQL task, for example `"sql_params": {"name": "john doe", "age": "35"}`. The SQL alert task does not support custom parameters. + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. + + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown + :returns: Long-running operation waiter for :class:`Run`. See :method:wait_get_run_job_terminated_or_skipped for more details. @@ -904,6 +912,10 @@ :param dbt_commands: List[str] (optional) An array of commands to execute for jobs with the dbt task, for example `"dbt_commands": ["dbt deps", "dbt seed", "dbt deps", "dbt seed", "dbt run"]` + + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. + + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown :param idempotency_token: str (optional) An optional token to guarantee the idempotency of job run requests. If a run with the provided token already exists, the request does not create a new run but returns the ID of the existing run @@ -924,9 +936,9 @@ in conjunction with notebook_params. The JSON representation of this field (for example `{"jar_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables] to set parameters containing information about job runs. + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. - [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown :param job_parameters: Dict[str,str] (optional) Job-level parameters used in the run. for example `"param": "overriding_val"` :param notebook_params: Dict[str,str] (optional) @@ -938,13 +950,13 @@ notebook_params cannot be specified in conjunction with jar_params. - Use [Task parameter variables] to set parameters containing information about job runs. + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. The JSON representation of this field (for example `{"notebook_params":{"name":"john doe","age":"35"}}`) cannot exceed 10,000 bytes. - [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown :param only: List[str] (optional) A list of task keys to run inside of the job. If this field is not provided, all tasks in the job will be run. @@ -965,7 +977,7 @@ would overwrite the parameters specified in job setting. The JSON representation of this field (for example `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables] to set parameters containing information about job runs. + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. Important @@ -973,7 +985,7 @@ returns an error. Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis. - [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown :param queue: :class:`QueueSettings` (optional) The queue settings of the run. :param spark_submit_params: List[str] (optional) @@ -983,7 +995,7 @@ in job setting. The JSON representation of this field (for example `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. - Use [Task parameter variables] to set parameters containing information about job runs + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. Important @@ -991,11 +1003,15 @@ returns an error. Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis. - [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown :param sql_params: Dict[str,str] (optional) A map from keys to values for jobs with SQL task, for example `"sql_params": {"name": "john doe", "age": "35"}`. The SQL alert task does not support custom parameters. + ⚠ **Deprecation note** Use [job parameters] to pass information down to tasks. + + [job parameters]: https://docs.databricks.com/jobs/job-parameters.html#job-parameter-pushdown + :returns: Long-running operation waiter for :class:`Run`. See :method:wait_get_run_job_terminated_or_skipped for more details. diff --git a/docs/workspace/ml/feature_engineering.rst b/docs/workspace/ml/feature_engineering.rst index 8d6988638..0ec7cc8bf 100644 --- a/docs/workspace/ml/feature_engineering.rst +++ b/docs/workspace/ml/feature_engineering.rst @@ -16,6 +16,16 @@ :returns: :class:`Feature` + .. py:method:: create_materialized_feature(materialized_feature: MaterializedFeature) -> MaterializedFeature + + Create a materialized feature. + + :param materialized_feature: :class:`MaterializedFeature` + The materialized feature to create. + + :returns: :class:`MaterializedFeature` + + .. py:method:: delete_feature(full_name: str) Delete a Feature. @@ -26,6 +36,16 @@ + .. py:method:: delete_materialized_feature(materialized_feature_id: str) + + Delete a materialized feature. + + :param materialized_feature_id: str + The ID of the materialized feature to delete. + + + + .. py:method:: get_feature(full_name: str) -> Feature Get a Feature. @@ -36,6 +56,16 @@ :returns: :class:`Feature` + .. py:method:: get_materialized_feature(materialized_feature_id: str) -> MaterializedFeature + + Get a materialized feature. + + :param materialized_feature_id: str + The ID of the materialized feature. + + :returns: :class:`MaterializedFeature` + + .. py:method:: list_features( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[Feature] List Features. @@ -48,6 +78,22 @@ :returns: Iterator over :class:`Feature` + .. py:method:: list_materialized_features( [, feature_name: Optional[str], page_size: Optional[int], page_token: Optional[str]]) -> Iterator[MaterializedFeature] + + List materialized features. + + :param feature_name: str (optional) + Filter by feature name. If specified, only materialized features materialized from this feature will + be returned. + :param page_size: int (optional) + The maximum number of results to return. Defaults to 100 if not specified. Cannot be greater than + 1000. + :param page_token: str (optional) + Pagination token to go to the next page based on a previous query. + + :returns: Iterator over :class:`MaterializedFeature` + + .. py:method:: update_feature(full_name: str, feature: Feature, update_mask: str) -> Feature Update a Feature. @@ -60,4 +106,19 @@ The list of fields to update. :returns: :class:`Feature` + + + .. py:method:: update_materialized_feature(materialized_feature_id: str, materialized_feature: MaterializedFeature, update_mask: str) -> MaterializedFeature + + Update a materialized feature (pause/resume). + + :param materialized_feature_id: str + Unique identifier for the materialized feature. + :param materialized_feature: :class:`MaterializedFeature` + The materialized feature to update. + :param update_mask: str + Provide the materialization feature fields which should be updated. Currently, only the + pipeline_state field can be updated. + + :returns: :class:`MaterializedFeature` \ No newline at end of file diff --git a/docs/workspace/sharing/recipient_federation_policies.rst b/docs/workspace/sharing/recipient_federation_policies.rst index 0cdcd8559..770f9b1ca 100644 --- a/docs/workspace/sharing/recipient_federation_policies.rst +++ b/docs/workspace/sharing/recipient_federation_policies.rst @@ -93,24 +93,4 @@ :param page_token: str (optional) :returns: Iterator over :class:`FederationPolicy` - - - .. py:method:: update(recipient_name: str, name: str, policy: FederationPolicy [, update_mask: Optional[str]]) -> FederationPolicy - - Updates an existing federation policy for an OIDC_RECIPIENT. The caller must be the owner of the - recipient. - - :param recipient_name: str - Name of the recipient. This is the name of the recipient for which the policy is being updated. - :param name: str - Name of the policy. This is the name of the current name of the policy. - :param policy: :class:`FederationPolicy` - :param update_mask: str (optional) - The field mask specifies which fields of the policy to update. To specify multiple fields in the - field mask, use comma as the separator (no space). The special value '*' indicates that all fields - should be updated (full replacement). If unspecified, all fields that are set in the policy provided - in the update request will overwrite the corresponding fields in the existing policy. Example value: - 'comment,oidc_policy.audiences'. - - :returns: :class:`FederationPolicy` \ No newline at end of file diff --git a/tests/databricks/sdk/service/lrotesting.py b/tests/databricks/sdk/service/lrotesting.py index 6a4325b53..60d70bf3d 100755 --- a/tests/databricks/sdk/service/lrotesting.py +++ b/tests/databricks/sdk/service/lrotesting.py @@ -335,6 +335,16 @@ def create_test_resource(self, resource: TestResource) -> CreateTestResourceOper operation = Operation.from_dict(res) return CreateTestResourceOperation(self, operation) + def delete_test_resource(self, resource_id: str) -> DeleteTestResourceOperation: + + headers = { + "Accept": "application/json", + } + + res = self._api.do("DELETE", f"/api/2.0/lro-testing/resources/{resource_id}", headers=headers) + operation = Operation.from_dict(res) + return DeleteTestResourceOperation(self, operation) + def get_operation(self, name: str) -> Operation: headers = { @@ -442,3 +452,84 @@ def done(self) -> bool: self._operation = operation return operation.done + + +class DeleteTestResourceOperation: + """Long-running operation for delete_test_resource""" + + def __init__(self, impl: LroTestingAPI, operation: Operation): + self._impl = impl + self._operation = operation + + def wait(self, opts: Optional[lro.LroOptions] = None): + """Wait blocks until the long-running operation is completed with default 20 min + timeout. If the operation didn't finish within the timeout, this function will + raise an error of type TimeoutError, otherwise returns successful response and + any errors encountered. + + :param opts: :class:`LroOptions` + Timeout options (default: 20 minutes) + + :returns: :class:`Any /* MISSING TYPE */` + """ + + def poll_operation(): + operation = self._impl.get_operation(name=self._operation.name) + + # Update local operation state + self._operation = operation + + if not operation.done: + return None, RetryError.continues("operation still in progress") + + if operation.error: + error_msg = operation.error.message if operation.error.message else "unknown error" + if operation.error.error_code: + error_msg = f"[{operation.error.error_code}] {error_msg}" + return None, RetryError.halt(Exception(f"operation failed: {error_msg}")) + + # Operation completed successfully, unmarshal response. + if operation.response is None: + return None, RetryError.halt(Exception("operation completed but no response available")) + + return {}, None + + poll(poll_operation, timeout=opts.timeout if opts is not None else timedelta(minutes=20)) + + def cancel(self): + """Starts asynchronous cancellation on a long-running operation. The server + makes a best effort to cancel the operation, but success is not guaranteed. + """ + self._impl.cancel_operation(name=self._operation.name) + + def name(self) -> str: + """Name returns the name of the long-running operation. The name is assigned + by the server and is unique within the service from which the operation is created. + + :returns: str + """ + return self._operation.name + + def metadata(self) -> TestResourceOperationMetadata: + """Metadata returns metadata associated with the long-running operation. + If the metadata is not available, the returned metadata is None. + + :returns: :class:`TestResourceOperationMetadata` or None + """ + if self._operation.metadata is None: + return None + + return TestResourceOperationMetadata.from_dict(self._operation.metadata) + + def done(self) -> bool: + """Done reports whether the long-running operation has completed. + + :returns: bool + """ + # Refresh the operation state first + operation = self._impl.get_operation(name=self._operation.name) + + # Update local operation state + self._operation = operation + + return operation.done diff --git a/tests/generated/test_http_call.py b/tests/generated/test_http_call.py index eecb6cf9c..f8e411601 100755 --- a/tests/generated/test_http_call.py +++ b/tests/generated/test_http_call.py @@ -98,7 +98,7 @@ def _fieldmask(d: str) -> FieldMask: ), ( lambda requests_mock: requests_mock.patch( - "http://localhost/api/2.0/http-call/update_string/789/True?optional_complex_query_param.nested_optional_query_param=nested_optional" + "http://localhost/api/2.0/http-call/update_string/789/true?optional_complex_query_param.nested_optional_query_param=nested_optional" ), lambda client: client.update_resource( resource=Resource( @@ -116,7 +116,7 @@ def _fieldmask(d: str) -> FieldMask: ), ( lambda requests_mock: requests_mock.patch( - "http://localhost/api/2.0/http-call/update_string/789/True?repeated_query_param=item1&repeated_query_param=item2&repeated_query_param=item3" + "http://localhost/api/2.0/http-call/update_string/789/true?repeated_query_param=item1&repeated_query_param=item2&repeated_query_param=item3" ), lambda client: client.update_resource( resource=Resource(