diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index ad1e20e88..6d4ada17b 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -27cebd58ae24e19c95c675db3a93b6046abaca2a \ No newline at end of file +8f5eedbc991c4f04ce1284406577b0c92d59a224 \ No newline at end of file diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 45460b391..c6b4db8a5 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -16,3 +16,24 @@ ### Internal Changes ### API Changes +* Add `create_space()` and `update_space()` methods for [w.genie](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/dashboards/genie.html) workspace-level service. +* Add `create_kafka_config()`, `delete_kafka_config()`, `get_kafka_config()`, `list_kafka_configs()` and `update_kafka_config()` methods for [w.feature_engineering](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/ml/feature_engineering.html) workspace-level service. +* Add `delete_online_table()` method for [w.feature_store](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/ml/feature_store.html) workspace-level service. +* Add `retrieve_user_visible_metrics()` method for [w.vector_search_endpoints](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/vectorsearch/vector_search_endpoints.html) workspace-level service. +* Add `major_version` field for `databricks.sdk.service.billing.CreateBillingUsageDashboardRequest`. +* Add `include_serialized_space` field for `databricks.sdk.service.dashboards.GenieGetSpaceRequest`. +* Add `serialized_space` field for `databricks.sdk.service.dashboards.GenieSpace`. +* Add `purpose` field for `databricks.sdk.service.dashboards.TextAttachment`. +* Add `budget_policy_id` field for `databricks.sdk.service.database.NewPipelineSpec`. +* Add `model` field for `databricks.sdk.service.jobs.TriggerSettings`. +* Add `kafka_source` field for `databricks.sdk.service.ml.DataSource`. +* Add `connection_parameters` field for `databricks.sdk.service.pipelines.IngestionGatewayPipelineDefinition`. +* Add `ingest_from_uc_foreign_catalog` field for `databricks.sdk.service.pipelines.IngestionPipelineDefinition`. +* Add `rewind_spec` field for `databricks.sdk.service.pipelines.StartUpdate`. +* Add `type_text` field for `databricks.sdk.service.vectorsearch.ColumnInfo`. +* Add `foreign_catalog` enum value for `databricks.sdk.service.pipelines.IngestionSourceType`. +* Add `creating` and `create_failed` enum values for `databricks.sdk.service.settings.CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionState`. +* Add `creating` and `create_failed` enum values for `databricks.sdk.service.settings.NccAzurePrivateEndpointRuleConnectionState`. +* [Breaking] Change `destinations` field for `databricks.sdk.service.catalog.AccessRequestDestinations` to no longer be required. +* Change `destinations` field for `databricks.sdk.service.catalog.AccessRequestDestinations` to no longer be required. +* [Breaking] Change `online_store_config` field for `databricks.sdk.service.ml.MaterializedFeature` to type `databricks.sdk.service.ml.OnlineStoreConfig` dataclass. diff --git a/databricks/sdk/__init__.py b/databricks/sdk/__init__.py index da0509a95..8067265a3 100755 --- a/databricks/sdk/__init__.py +++ b/databricks/sdk/__init__.py @@ -856,7 +856,7 @@ def resource_quotas(self) -> pkg_catalog.ResourceQuotasAPI: @property def rfa(self) -> pkg_catalog.RfaAPI: - """Request for Access enables customers to request access to and manage access request destinations for Unity Catalog securables.""" + """Request for Access enables users to request access for Unity Catalog securables.""" return self._rfa @property diff --git a/databricks/sdk/service/apps.py b/databricks/sdk/service/apps.py index f22edec9b..c6d6c2dbc 100755 --- a/databricks/sdk/service/apps.py +++ b/databricks/sdk/service/apps.py @@ -1474,7 +1474,6 @@ def from_dict(cls, d: Dict[str, Any]) -> ApplicationStatus: class ComputeSize(Enum): LARGE = "LARGE" - LIQUID = "LIQUID" MEDIUM = "MEDIUM" diff --git a/databricks/sdk/service/billing.py b/databricks/sdk/service/billing.py index 77ef2f792..ddd02c457 100755 --- a/databricks/sdk/service/billing.py +++ b/databricks/sdk/service/billing.py @@ -1428,6 +1428,12 @@ def from_dict(cls, d: Dict[str, Any]) -> UpdateBudgetConfigurationResponse: return cls(budget=_from_dict(d, "budget", BudgetConfiguration)) +class UsageDashboardMajorVersion(Enum): + + USAGE_DASHBOARD_MAJOR_VERSION_1 = "USAGE_DASHBOARD_MAJOR_VERSION_1" + USAGE_DASHBOARD_MAJOR_VERSION_2 = "USAGE_DASHBOARD_MAJOR_VERSION_2" + + class UsageDashboardType(Enum): USAGE_DASHBOARD_TYPE_GLOBAL = "USAGE_DASHBOARD_TYPE_GLOBAL" @@ -2023,13 +2029,19 @@ def __init__(self, api_client): self._api = api_client def create( - self, *, dashboard_type: Optional[UsageDashboardType] = None, workspace_id: Optional[int] = None + self, + *, + dashboard_type: Optional[UsageDashboardType] = None, + major_version: Optional[UsageDashboardMajorVersion] = None, + workspace_id: Optional[int] = None, ) -> CreateBillingUsageDashboardResponse: """Create a usage dashboard specified by workspaceId, accountId, and dashboard type. :param dashboard_type: :class:`UsageDashboardType` (optional) Workspace level usage dashboard shows usage data for the specified workspace ID. Global level usage dashboard shows usage data for all workspaces in the account. + :param major_version: :class:`UsageDashboardMajorVersion` (optional) + The major version of the usage dashboard template to use. Defaults to VERSION_1. :param workspace_id: int (optional) The workspace ID of the workspace in which the usage dashboard is created. @@ -2039,6 +2051,8 @@ def create( body = {} if dashboard_type is not None: body["dashboard_type"] = dashboard_type.value + if major_version is not None: + body["major_version"] = major_version.value if workspace_id is not None: body["workspace_id"] = workspace_id headers = { diff --git a/databricks/sdk/service/catalog.py b/databricks/sdk/service/catalog.py index a99c5405a..1e5276d26 100755 --- a/databricks/sdk/service/catalog.py +++ b/databricks/sdk/service/catalog.py @@ -23,9 +23,6 @@ @dataclass class AccessRequestDestinations: - destinations: List[NotificationDestination] - """The access request destinations for the securable.""" - securable: Securable """The securable for which the access request destinations are being retrieved.""" @@ -33,6 +30,9 @@ class AccessRequestDestinations: """Indicates whether any destinations are hidden from the caller due to a lack of permissions. This value is true if the caller does not have permission to see all destinations.""" + destinations: Optional[List[NotificationDestination]] = None + """The access request destinations for the securable.""" + def as_dict(self) -> dict: """Serializes the AccessRequestDestinations into a dictionary suitable for use as a JSON request body.""" body = {} @@ -1740,7 +1740,7 @@ def from_dict(cls, d: Dict[str, Any]) -> ConnectionInfo: class ConnectionType(Enum): - """Next Id: 47""" + """Next Id: 48""" BIGQUERY = "BIGQUERY" DATABRICKS = "DATABRICKS" @@ -1750,7 +1750,6 @@ class ConnectionType(Enum): HTTP = "HTTP" MYSQL = "MYSQL" ORACLE = "ORACLE" - PALANTIR = "PALANTIR" POSTGRESQL = "POSTGRESQL" POWER_BI = "POWER_BI" REDSHIFT = "REDSHIFT" @@ -8745,7 +8744,7 @@ def from_dict(cls, d: Dict[str, Any]) -> Securable: class SecurableKind(Enum): - """Latest kind: CONNECTION_AWS_SECRETS_MANAGER = 270; Next id:271""" + """Latest kind: SECRET_EXTERNAL_AWS_SECRETS_MANAGER = 273; Next id:274""" TABLE_DB_STORAGE = "TABLE_DB_STORAGE" TABLE_DELTA = "TABLE_DELTA" @@ -8787,7 +8786,6 @@ class SecurableKind(Enum): TABLE_FOREIGN_MYSQL = "TABLE_FOREIGN_MYSQL" TABLE_FOREIGN_NETSUITE = "TABLE_FOREIGN_NETSUITE" TABLE_FOREIGN_ORACLE = "TABLE_FOREIGN_ORACLE" - TABLE_FOREIGN_PALANTIR = "TABLE_FOREIGN_PALANTIR" TABLE_FOREIGN_POSTGRESQL = "TABLE_FOREIGN_POSTGRESQL" TABLE_FOREIGN_REDSHIFT = "TABLE_FOREIGN_REDSHIFT" TABLE_FOREIGN_SALESFORCE = "TABLE_FOREIGN_SALESFORCE" @@ -14650,12 +14648,10 @@ def list_quotas( class RfaAPI: - """Request for Access enables customers to request access to and manage access request destinations for Unity - Catalog securables. + """Request for Access enables users to request access for Unity Catalog securables. - These APIs provide a standardized way to update, get, and request to access request destinations. - Fine-grained authorization ensures that only users with appropriate permissions can manage access request - destinations.""" + These APIs provide a standardized way for securable owners (or users with MANAGE privileges) to manage + access request destinations.""" def __init__(self, api_client): self._api = api_client diff --git a/databricks/sdk/service/compute.py b/databricks/sdk/service/compute.py index 64e6f4b0a..473c7cca1 100755 --- a/databricks/sdk/service/compute.py +++ b/databricks/sdk/service/compute.py @@ -171,7 +171,7 @@ class AwsAttributes: be of a form like "us-west-2a". The provided availability zone must be in the same region as the Databricks deployment. For example, "us-west-2a" is not a valid zone id if the Databricks deployment resides in the "us-east-1" region. This is an optional field at cluster creation, and - if not specified, a default zone will be used. If the zone specified is "auto", will try to + if not specified, the zone "auto" will be used. If the zone specified is "auto", will try to place cluster in a zone with high availability, and will retry placement in a different AZ if there is not enough capacity. @@ -7112,7 +7112,6 @@ class TerminationReasonCode(Enum): DOCKER_IMAGE_PULL_FAILURE = "DOCKER_IMAGE_PULL_FAILURE" DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION = "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION" DOCKER_INVALID_OS_EXCEPTION = "DOCKER_INVALID_OS_EXCEPTION" - DRIVER_DNS_RESOLUTION_FAILURE = "DRIVER_DNS_RESOLUTION_FAILURE" DRIVER_EVICTION = "DRIVER_EVICTION" DRIVER_LAUNCH_TIMEOUT = "DRIVER_LAUNCH_TIMEOUT" DRIVER_NODE_UNREACHABLE = "DRIVER_NODE_UNREACHABLE" @@ -7191,8 +7190,6 @@ class TerminationReasonCode(Enum): NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG = "NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG" NETWORK_CONFIGURATION_FAILURE = "NETWORK_CONFIGURATION_FAILURE" NFS_MOUNT_FAILURE = "NFS_MOUNT_FAILURE" - NO_ACTIVATED_K8S = "NO_ACTIVATED_K8S" - NO_ACTIVATED_K8S_TESTING_TAG = "NO_ACTIVATED_K8S_TESTING_TAG" NO_MATCHED_K8S = "NO_MATCHED_K8S" NO_MATCHED_K8S_TESTING_TAG = "NO_MATCHED_K8S_TESTING_TAG" NPIP_TUNNEL_SETUP_FAILURE = "NPIP_TUNNEL_SETUP_FAILURE" @@ -7205,7 +7202,6 @@ class TerminationReasonCode(Enum): SECRET_CREATION_FAILURE = "SECRET_CREATION_FAILURE" SECRET_PERMISSION_DENIED = "SECRET_PERMISSION_DENIED" SECRET_RESOLUTION_ERROR = "SECRET_RESOLUTION_ERROR" - SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION = "SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION" SECURITY_DAEMON_REGISTRATION_EXCEPTION = "SECURITY_DAEMON_REGISTRATION_EXCEPTION" SELF_BOOTSTRAP_FAILURE = "SELF_BOOTSTRAP_FAILURE" SERVERLESS_LONG_RUNNING_TERMINATED = "SERVERLESS_LONG_RUNNING_TERMINATED" diff --git a/databricks/sdk/service/dashboards.py b/databricks/sdk/service/dashboards.py index 5bf772f27..12c519dca 100755 --- a/databricks/sdk/service/dashboards.py +++ b/databricks/sdk/service/dashboards.py @@ -830,6 +830,12 @@ class GenieSpace: description: Optional[str] = None """Description of the Genie Space""" + serialized_space: Optional[str] = None + """The contents of the Genie Space in serialized string form. This field is excluded in List Genie + spaces responses. Use the [Get Genie Space](:method:genie/getspace) API to retrieve an example + response, which includes the `serialized_space` field. This field provides the structure of the + JSON string that represents the space's layout and components.""" + warehouse_id: Optional[str] = None """Warehouse associated with the Genie Space""" @@ -838,6 +844,8 @@ def as_dict(self) -> dict: body = {} if self.description is not None: body["description"] = self.description + if self.serialized_space is not None: + body["serialized_space"] = self.serialized_space if self.space_id is not None: body["space_id"] = self.space_id if self.title is not None: @@ -851,6 +859,8 @@ def as_shallow_dict(self) -> dict: body = {} if self.description is not None: body["description"] = self.description + if self.serialized_space is not None: + body["serialized_space"] = self.serialized_space if self.space_id is not None: body["space_id"] = self.space_id if self.title is not None: @@ -864,6 +874,7 @@ def from_dict(cls, d: Dict[str, Any]) -> GenieSpace: """Deserializes the GenieSpace from a dictionary.""" return cls( description=d.get("description", None), + serialized_space=d.get("serialized_space", None), space_id=d.get("space_id", None), title=d.get("title", None), warehouse_id=d.get("warehouse_id", None), @@ -1161,7 +1172,6 @@ class MessageErrorType(Enum): INTERNAL_CATALOG_PATH_OVERLAP_EXCEPTION = "INTERNAL_CATALOG_PATH_OVERLAP_EXCEPTION" INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION = "INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION" INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION = "INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION" - INVALID_CHAT_COMPLETION_ARGUMENTS_JSON_EXCEPTION = "INVALID_CHAT_COMPLETION_ARGUMENTS_JSON_EXCEPTION" INVALID_CHAT_COMPLETION_JSON_EXCEPTION = "INVALID_CHAT_COMPLETION_JSON_EXCEPTION" INVALID_COMPLETION_REQUEST_EXCEPTION = "INVALID_COMPLETION_REQUEST_EXCEPTION" INVALID_FUNCTION_CALL_EXCEPTION = "INVALID_FUNCTION_CALL_EXCEPTION" @@ -1639,6 +1649,9 @@ class TextAttachment: id: Optional[str] = None + purpose: Optional[TextAttachmentPurpose] = None + """Purpose/intent of this text attachment""" + def as_dict(self) -> dict: """Serializes the TextAttachment into a dictionary suitable for use as a JSON request body.""" body = {} @@ -1646,6 +1659,8 @@ def as_dict(self) -> dict: body["content"] = self.content if self.id is not None: body["id"] = self.id + if self.purpose is not None: + body["purpose"] = self.purpose.value return body def as_shallow_dict(self) -> dict: @@ -1655,12 +1670,22 @@ def as_shallow_dict(self) -> dict: body["content"] = self.content if self.id is not None: body["id"] = self.id + if self.purpose is not None: + body["purpose"] = self.purpose return body @classmethod def from_dict(cls, d: Dict[str, Any]) -> TextAttachment: """Deserializes the TextAttachment from a dictionary.""" - return cls(content=d.get("content", None), id=d.get("id", None)) + return cls( + content=d.get("content", None), id=d.get("id", None), purpose=_enum(d, "purpose", TextAttachmentPurpose) + ) + + +class TextAttachmentPurpose(Enum): + """Purpose/intent of a text attachment""" + + FOLLOW_UP_QUESTION = "FOLLOW_UP_QUESTION" @dataclass @@ -1787,6 +1812,53 @@ def create_message_and_wait( timeout=timeout ) + def create_space( + self, + warehouse_id: str, + serialized_space: str, + *, + description: Optional[str] = None, + parent_path: Optional[str] = None, + title: Optional[str] = None, + ) -> GenieSpace: + """Creates a Genie space from a serialized payload. + + :param warehouse_id: str + Warehouse to associate with the new space + :param serialized_space: str + The contents of the Genie Space in serialized string form. Use the [Get Genie + Space](:method:genie/getspace) API to retrieve an example response, which includes the + `serialized_space` field. This field provides the structure of the JSON string that represents the + space's layout and components. + :param description: str (optional) + Optional description + :param parent_path: str (optional) + Parent folder path where the space will be registered + :param title: str (optional) + Optional title override + + :returns: :class:`GenieSpace` + """ + + body = {} + if description is not None: + body["description"] = description + if parent_path is not None: + body["parent_path"] = parent_path + if serialized_space is not None: + body["serialized_space"] = serialized_space + if title is not None: + body["title"] = title + if warehouse_id is not None: + body["warehouse_id"] = warehouse_id + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", "/api/2.0/genie/spaces", body=body, headers=headers) + return GenieSpace.from_dict(res) + def delete_conversation(self, space_id: str, conversation_id: str): """Delete a conversation. @@ -1992,20 +2064,26 @@ def get_message_query_result_by_attachment( ) return GenieGetMessageQueryResultResponse.from_dict(res) - def get_space(self, space_id: str) -> GenieSpace: + def get_space(self, space_id: str, *, include_serialized_space: Optional[bool] = None) -> GenieSpace: """Get details of a Genie Space. :param space_id: str The ID associated with the Genie space + :param include_serialized_space: bool (optional) + Whether to include the serialized space export in the response. Requires at least CAN EDIT + permission on the space. :returns: :class:`GenieSpace` """ + query = {} + if include_serialized_space is not None: + query["include_serialized_space"] = include_serialized_space headers = { "Accept": "application/json", } - res = self._api.do("GET", f"/api/2.0/genie/spaces/{space_id}", headers=headers) + res = self._api.do("GET", f"/api/2.0/genie/spaces/{space_id}", query=query, headers=headers) return GenieSpace.from_dict(res) def list_conversation_messages( @@ -2184,6 +2262,51 @@ def trash_space(self, space_id: str): self._api.do("DELETE", f"/api/2.0/genie/spaces/{space_id}", headers=headers) + def update_space( + self, + space_id: str, + *, + description: Optional[str] = None, + serialized_space: Optional[str] = None, + title: Optional[str] = None, + warehouse_id: Optional[str] = None, + ) -> GenieSpace: + """Updates a Genie space with a serialized payload. + + :param space_id: str + Genie space ID + :param description: str (optional) + Optional description + :param serialized_space: str (optional) + The contents of the Genie Space in serialized string form (full replacement). Use the [Get Genie + Space](:method:genie/getspace) API to retrieve an example response, which includes the + `serialized_space` field. This field provides the structure of the JSON string that represents the + space's layout and components. + :param title: str (optional) + Optional title override + :param warehouse_id: str (optional) + Optional warehouse override + + :returns: :class:`GenieSpace` + """ + + body = {} + if description is not None: + body["description"] = description + if serialized_space is not None: + body["serialized_space"] = serialized_space + if title is not None: + body["title"] = title + if warehouse_id is not None: + body["warehouse_id"] = warehouse_id + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("PATCH", f"/api/2.0/genie/spaces/{space_id}", body=body, headers=headers) + return GenieSpace.from_dict(res) + class LakeviewAPI: """These APIs provide specific management operations for Lakeview dashboards. Generic resource management can diff --git a/databricks/sdk/service/database.py b/databricks/sdk/service/database.py index b0bbbd7cb..21d6ea9c9 100755 --- a/databricks/sdk/service/database.py +++ b/databricks/sdk/service/database.py @@ -824,6 +824,9 @@ class NewPipelineSpec: """Custom fields that user can set for pipeline while creating SyncedDatabaseTable. Note that other fields of pipeline are still inferred by table def internally""" + budget_policy_id: Optional[str] = None + """Budget policy to set on the newly created pipeline.""" + storage_catalog: Optional[str] = None """This field needs to be specified if the destination catalog is a managed postgres catalog. @@ -839,6 +842,8 @@ class NewPipelineSpec: def as_dict(self) -> dict: """Serializes the NewPipelineSpec into a dictionary suitable for use as a JSON request body.""" body = {} + if self.budget_policy_id is not None: + body["budget_policy_id"] = self.budget_policy_id if self.storage_catalog is not None: body["storage_catalog"] = self.storage_catalog if self.storage_schema is not None: @@ -848,6 +853,8 @@ def as_dict(self) -> dict: def as_shallow_dict(self) -> dict: """Serializes the NewPipelineSpec into a shallow dictionary of its immediate attributes.""" body = {} + if self.budget_policy_id is not None: + body["budget_policy_id"] = self.budget_policy_id if self.storage_catalog is not None: body["storage_catalog"] = self.storage_catalog if self.storage_schema is not None: @@ -857,7 +864,11 @@ def as_shallow_dict(self) -> dict: @classmethod def from_dict(cls, d: Dict[str, Any]) -> NewPipelineSpec: """Deserializes the NewPipelineSpec from a dictionary.""" - return cls(storage_catalog=d.get("storage_catalog", None), storage_schema=d.get("storage_schema", None)) + return cls( + budget_policy_id=d.get("budget_policy_id", None), + storage_catalog=d.get("storage_catalog", None), + storage_schema=d.get("storage_schema", None), + ) class ProvisioningInfoState(Enum): diff --git a/databricks/sdk/service/iam.py b/databricks/sdk/service/iam.py index e84121f29..4c0d13ab6 100755 --- a/databricks/sdk/service/iam.py +++ b/databricks/sdk/service/iam.py @@ -2525,8 +2525,9 @@ def list( start_index: Optional[int] = None, ) -> Iterator[AccountGroup]: """Gets all details of the groups associated with the Databricks account. As of 08/22/2025, this endpoint - will not return members. Instead, members should be retrieved by iterating through `Get group - details`. + will no longer return members. Instead, members should be retrieved by iterating through `Get group + details`. Existing accounts that rely on this attribute will not be impacted and will continue + receiving member data as before. :param attributes: str (optional) Comma-separated list of attributes to return in response. diff --git a/databricks/sdk/service/jobs.py b/databricks/sdk/service/jobs.py index 1ca8e631c..26007f1b2 100755 --- a/databricks/sdk/service/jobs.py +++ b/databricks/sdk/service/jobs.py @@ -2915,10 +2915,10 @@ class JobSettings: environments: Optional[List[JobEnvironment]] = None """A list of task execution environment specifications that can be referenced by serverless tasks - of this job. An environment is required to be present for serverless tasks. For serverless - notebook tasks, the environment is accessible in the notebook environment panel. For other - serverless tasks, the task environment is required to be specified using environment_key in the - task settings.""" + of this job. For serverless notebook tasks, if the environment_key is not specified, the + notebook environment will be used if present. If a jobs environment is specified, it will + override the notebook environment. For other serverless tasks, the task environment is required + to be specified using environment_key in the task settings.""" format: Optional[Format] = None """Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. @@ -3460,6 +3460,78 @@ def from_dict(cls, d: Dict[str, Any]) -> ListRunsResponse: ) +@dataclass +class ModelTriggerConfiguration: + condition: ModelTriggerConfigurationCondition + """The condition based on which to trigger a job run.""" + + aliases: Optional[List[str]] = None + """Aliases of the model versions to monitor. Can only be used in conjunction with condition + MODEL_ALIAS_SET.""" + + min_time_between_triggers_seconds: Optional[int] = None + """If set, the trigger starts a run only after the specified amount of time has passed since the + last time the trigger fired. The minimum allowed value is 60 seconds.""" + + securable_name: Optional[str] = None + """Name of the securable to monitor ("mycatalog.myschema.mymodel" in the case of model-level + triggers, "mycatalog.myschema" in the case of schema-level triggers) or empty in the case of + metastore-level triggers.""" + + wait_after_last_change_seconds: Optional[int] = None + """If set, the trigger starts a run only after no model updates have occurred for the specified + time and can be used to wait for a series of model updates before triggering a run. The minimum + allowed value is 60 seconds.""" + + def as_dict(self) -> dict: + """Serializes the ModelTriggerConfiguration into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.aliases: + body["aliases"] = [v for v in self.aliases] + if self.condition is not None: + body["condition"] = self.condition.value + if self.min_time_between_triggers_seconds is not None: + body["min_time_between_triggers_seconds"] = self.min_time_between_triggers_seconds + if self.securable_name is not None: + body["securable_name"] = self.securable_name + if self.wait_after_last_change_seconds is not None: + body["wait_after_last_change_seconds"] = self.wait_after_last_change_seconds + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ModelTriggerConfiguration into a shallow dictionary of its immediate attributes.""" + body = {} + if self.aliases: + body["aliases"] = self.aliases + if self.condition is not None: + body["condition"] = self.condition + if self.min_time_between_triggers_seconds is not None: + body["min_time_between_triggers_seconds"] = self.min_time_between_triggers_seconds + if self.securable_name is not None: + body["securable_name"] = self.securable_name + if self.wait_after_last_change_seconds is not None: + body["wait_after_last_change_seconds"] = self.wait_after_last_change_seconds + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ModelTriggerConfiguration: + """Deserializes the ModelTriggerConfiguration from a dictionary.""" + return cls( + aliases=d.get("aliases", None), + condition=_enum(d, "condition", ModelTriggerConfigurationCondition), + min_time_between_triggers_seconds=d.get("min_time_between_triggers_seconds", None), + securable_name=d.get("securable_name", None), + wait_after_last_change_seconds=d.get("wait_after_last_change_seconds", None), + ) + + +class ModelTriggerConfigurationCondition(Enum): + + MODEL_ALIAS_SET = "MODEL_ALIAS_SET" + MODEL_CREATED = "MODEL_CREATED" + MODEL_VERSION_READY = "MODEL_VERSION_READY" + + @dataclass class NotebookOutput: result: Optional[str] = None @@ -7963,6 +8035,8 @@ class TriggerSettings: file_arrival: Optional[FileArrivalTriggerConfiguration] = None """File arrival trigger settings.""" + model: Optional[ModelTriggerConfiguration] = None + pause_status: Optional[PauseStatus] = None """Whether this trigger is paused or not.""" @@ -7976,6 +8050,8 @@ def as_dict(self) -> dict: body = {} if self.file_arrival: body["file_arrival"] = self.file_arrival.as_dict() + if self.model: + body["model"] = self.model.as_dict() if self.pause_status is not None: body["pause_status"] = self.pause_status.value if self.periodic: @@ -7989,6 +8065,8 @@ def as_shallow_dict(self) -> dict: body = {} if self.file_arrival: body["file_arrival"] = self.file_arrival + if self.model: + body["model"] = self.model if self.pause_status is not None: body["pause_status"] = self.pause_status if self.periodic: @@ -8002,6 +8080,7 @@ def from_dict(cls, d: Dict[str, Any]) -> TriggerSettings: """Deserializes the TriggerSettings from a dictionary.""" return cls( file_arrival=_from_dict(d, "file_arrival", FileArrivalTriggerConfiguration), + model=_from_dict(d, "model", ModelTriggerConfiguration), pause_status=_enum(d, "pause_status", PauseStatus), periodic=_from_dict(d, "periodic", PeriodicTriggerConfiguration), table_update=_from_dict(d, "table_update", TableUpdateTriggerConfiguration), @@ -8246,7 +8325,7 @@ class JobsAPI: scalable resources. Your job can consist of a single task or can be a large, multi-task workflow with complex dependencies. Databricks manages the task orchestration, cluster management, monitoring, and error reporting for all of your jobs. You can run your jobs immediately or periodically through an easy-to-use - scheduling system. You can implement job tasks using notebooks, JARS, Delta Live Tables pipelines, or + scheduling system. You can implement job tasks using notebooks, JARS, Spark Declarative Pipelines, or Python, Scala, Spark submit, and Java applications. You should never hard code secrets or store them in plain text. Use the [Secrets CLI] to manage secrets in @@ -8397,9 +8476,10 @@ def create( as when this job is deleted. :param environments: List[:class:`JobEnvironment`] (optional) A list of task execution environment specifications that can be referenced by serverless tasks of - this job. An environment is required to be present for serverless tasks. For serverless notebook - tasks, the environment is accessible in the notebook environment panel. For other serverless tasks, - the task environment is required to be specified using environment_key in the task settings. + this job. For serverless notebook tasks, if the environment_key is not specified, the notebook + environment will be used if present. If a jobs environment is specified, it will override the + notebook environment. For other serverless tasks, the task environment is required to be specified + using environment_key in the task settings. :param format: :class:`Format` (optional) Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. When using the Jobs API 2.1 this value is always set to `"MULTI_TASK"`. diff --git a/databricks/sdk/service/marketplace.py b/databricks/sdk/service/marketplace.py index 5e5ccc267..84d761c36 100755 --- a/databricks/sdk/service/marketplace.py +++ b/databricks/sdk/service/marketplace.py @@ -3,7 +3,6 @@ from __future__ import annotations import logging -import uuid from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Iterator, List, Optional @@ -2165,6 +2164,8 @@ class PersonalizationRequest: recipient_type: Optional[DeltaSharingRecipientType] = None share: Optional[ShareInfo] = None + """Share information is required for data listings but should be empty/ignored for non-data + listings (MCP and App).""" status: Optional[PersonalizationRequestStatus] = None @@ -4096,8 +4097,6 @@ def update( :returns: :class:`UpdatePersonalizationRequestResponse` """ - if request_id is None or request_id == "": - request_id = str(uuid.uuid4()) body = {} if reason is not None: body["reason"] = reason diff --git a/databricks/sdk/service/ml.py b/databricks/sdk/service/ml.py index 94fd823ca..4af356e2f 100755 --- a/databricks/sdk/service/ml.py +++ b/databricks/sdk/service/ml.py @@ -10,6 +10,7 @@ from enum import Enum from typing import Any, Callable, Dict, Iterator, List, Optional +from databricks.sdk.common.types.fieldmask import FieldMask from databricks.sdk.service._internal import (Wait, _enum, _from_dict, _repeated_dict, _repeated_enum) @@ -201,6 +202,32 @@ def from_dict(cls, d: Dict[str, Any]) -> ApproveTransitionRequestResponse: return cls(activity=_from_dict(d, "activity", Activity)) +@dataclass +class AuthConfig: + uc_service_credential_name: Optional[str] = None + """Name of the Unity Catalog service credential. This value will be set under the option + databricks.serviceCredential""" + + def as_dict(self) -> dict: + """Serializes the AuthConfig into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.uc_service_credential_name is not None: + body["uc_service_credential_name"] = self.uc_service_credential_name + return body + + def as_shallow_dict(self) -> dict: + """Serializes the AuthConfig into a shallow dictionary of its immediate attributes.""" + body = {} + if self.uc_service_credential_name is not None: + body["uc_service_credential_name"] = self.uc_service_credential_name + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> AuthConfig: + """Deserializes the AuthConfig from a dictionary.""" + return cls(uc_service_credential_name=d.get("uc_service_credential_name", None)) + + @dataclass class BatchCreateMaterializedFeaturesResponse: materialized_features: Optional[List[MaterializedFeature]] = None @@ -226,6 +253,33 @@ def from_dict(cls, d: Dict[str, Any]) -> BatchCreateMaterializedFeaturesResponse return cls(materialized_features=_repeated_dict(d, "materialized_features", MaterializedFeature)) +@dataclass +class ColumnIdentifier: + variant_expr_path: str + """String representation of the column name or variant expression path. For nested fields, the leaf + value is what will be present in materialized tables and expected to match at query time. For + example, the leaf node of value:trip_details.location_details.pickup_zip is pickup_zip.""" + + def as_dict(self) -> dict: + """Serializes the ColumnIdentifier into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.variant_expr_path is not None: + body["variant_expr_path"] = self.variant_expr_path + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ColumnIdentifier into a shallow dictionary of its immediate attributes.""" + body = {} + if self.variant_expr_path is not None: + body["variant_expr_path"] = self.variant_expr_path + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ColumnIdentifier: + """Deserializes the ColumnIdentifier from a dictionary.""" + return cls(variant_expr_path=d.get("variant_expr_path", None)) + + class CommentActivityAction(Enum): """An action that a user (with sufficient permissions) could take on an activity or comment. @@ -600,11 +654,15 @@ def from_dict(cls, d: Dict[str, Any]) -> CreateWebhookResponse: class DataSource: delta_table_source: Optional[DeltaTableSource] = None + kafka_source: Optional[KafkaSource] = None + def as_dict(self) -> dict: """Serializes the DataSource into a dictionary suitable for use as a JSON request body.""" body = {} if self.delta_table_source: body["delta_table_source"] = self.delta_table_source.as_dict() + if self.kafka_source: + body["kafka_source"] = self.kafka_source.as_dict() return body def as_shallow_dict(self) -> dict: @@ -612,12 +670,17 @@ def as_shallow_dict(self) -> dict: body = {} if self.delta_table_source: body["delta_table_source"] = self.delta_table_source + if self.kafka_source: + body["kafka_source"] = self.kafka_source return body @classmethod def from_dict(cls, d: Dict[str, Any]) -> DataSource: """Deserializes the DataSource from a dictionary.""" - return cls(delta_table_source=_from_dict(d, "delta_table_source", DeltaTableSource)) + return cls( + delta_table_source=_from_dict(d, "delta_table_source", DeltaTableSource), + kafka_source=_from_dict(d, "kafka_source", KafkaSource), + ) @dataclass @@ -1385,7 +1448,11 @@ class Feature: """The filter condition applied to the source data before aggregation.""" lineage_context: Optional[LineageContext] = None - """Lineage context information for this feature.""" + """WARNING: This field is primarily intended for internal use by Databricks systems and is + automatically populated when features are created through Databricks notebooks or jobs. Users + should not manually set this field as incorrect values may lead to inaccurate lineage tracking + or unexpected behavior. This field will be set by feature-engineering client and should be left + unset by SDK and terraform users.""" def as_dict(self) -> dict: """Serializes the Feature into a dictionary suitable for use as a JSON request body.""" @@ -2359,6 +2426,130 @@ def from_dict(cls, d: Dict[str, Any]) -> JobSpecWithoutSecret: return cls(job_id=d.get("job_id", None), workspace_url=d.get("workspace_url", None)) +@dataclass +class KafkaConfig: + name: str + """Name that uniquely identifies this Kafka config within the metastore. This will be the + identifier used from the Feature object to reference these configs for a feature. Can be + distinct from topic name.""" + + bootstrap_servers: str + """A comma-separated list of host/port pairs pointing to Kafka cluster.""" + + subscription_mode: SubscriptionMode + """Options to configure which Kafka topics to pull data from.""" + + auth_config: AuthConfig + """Authentication configuration for connection to topics.""" + + extra_options: Optional[Dict[str, str]] = None + """Catch-all for miscellaneous options. Keys should be source options or Kafka consumer options + (kafka.*)""" + + key_schema: Optional[SchemaConfig] = None + """Schema configuration for extracting message keys from topics. At least one of key_schema and + value_schema must be provided.""" + + value_schema: Optional[SchemaConfig] = None + """Schema configuration for extracting message values from topics. At least one of key_schema and + value_schema must be provided.""" + + def as_dict(self) -> dict: + """Serializes the KafkaConfig into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.auth_config: + body["auth_config"] = self.auth_config.as_dict() + if self.bootstrap_servers is not None: + body["bootstrap_servers"] = self.bootstrap_servers + if self.extra_options: + body["extra_options"] = self.extra_options + if self.key_schema: + body["key_schema"] = self.key_schema.as_dict() + if self.name is not None: + body["name"] = self.name + if self.subscription_mode: + body["subscription_mode"] = self.subscription_mode.as_dict() + if self.value_schema: + body["value_schema"] = self.value_schema.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the KafkaConfig into a shallow dictionary of its immediate attributes.""" + body = {} + if self.auth_config: + body["auth_config"] = self.auth_config + if self.bootstrap_servers is not None: + body["bootstrap_servers"] = self.bootstrap_servers + if self.extra_options: + body["extra_options"] = self.extra_options + if self.key_schema: + body["key_schema"] = self.key_schema + if self.name is not None: + body["name"] = self.name + if self.subscription_mode: + body["subscription_mode"] = self.subscription_mode + if self.value_schema: + body["value_schema"] = self.value_schema + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> KafkaConfig: + """Deserializes the KafkaConfig from a dictionary.""" + return cls( + auth_config=_from_dict(d, "auth_config", AuthConfig), + bootstrap_servers=d.get("bootstrap_servers", None), + extra_options=d.get("extra_options", None), + key_schema=_from_dict(d, "key_schema", SchemaConfig), + name=d.get("name", None), + subscription_mode=_from_dict(d, "subscription_mode", SubscriptionMode), + value_schema=_from_dict(d, "value_schema", SchemaConfig), + ) + + +@dataclass +class KafkaSource: + name: str + """Name of the Kafka source, used to identify it. This is used to look up the corresponding + KafkaConfig object. Can be distinct from topic name.""" + + entity_column_identifiers: List[ColumnIdentifier] + """The entity column identifiers of the Kafka source.""" + + timeseries_column_identifier: ColumnIdentifier + """The timeseries column identifier of the Kafka source.""" + + def as_dict(self) -> dict: + """Serializes the KafkaSource into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.entity_column_identifiers: + body["entity_column_identifiers"] = [v.as_dict() for v in self.entity_column_identifiers] + if self.name is not None: + body["name"] = self.name + if self.timeseries_column_identifier: + body["timeseries_column_identifier"] = self.timeseries_column_identifier.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the KafkaSource into a shallow dictionary of its immediate attributes.""" + body = {} + if self.entity_column_identifiers: + body["entity_column_identifiers"] = self.entity_column_identifiers + if self.name is not None: + body["name"] = self.name + if self.timeseries_column_identifier: + body["timeseries_column_identifier"] = self.timeseries_column_identifier + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> KafkaSource: + """Deserializes the KafkaSource from a dictionary.""" + return cls( + entity_column_identifiers=_repeated_dict(d, "entity_column_identifiers", ColumnIdentifier), + name=d.get("name", None), + timeseries_column_identifier=_from_dict(d, "timeseries_column_identifier", ColumnIdentifier), + ) + + @dataclass class LineageContext: """Lineage context information for tracking where an API was invoked. This will allow us to track @@ -2585,6 +2776,41 @@ def from_dict(cls, d: Dict[str, Any]) -> ListFeaturesResponse: return cls(features=_repeated_dict(d, "features", Feature), next_page_token=d.get("next_page_token", None)) +@dataclass +class ListKafkaConfigsResponse: + kafka_configs: List[KafkaConfig] + """List of Kafka configs. Schemas are not included in the response.""" + + next_page_token: Optional[str] = None + """Pagination token to request the next page of results for this query.""" + + def as_dict(self) -> dict: + """Serializes the ListKafkaConfigsResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.kafka_configs: + body["kafka_configs"] = [v.as_dict() for v in self.kafka_configs] + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ListKafkaConfigsResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.kafka_configs: + body["kafka_configs"] = self.kafka_configs + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ListKafkaConfigsResponse: + """Deserializes the ListKafkaConfigsResponse from a dictionary.""" + return cls( + kafka_configs=_repeated_dict(d, "kafka_configs", KafkaConfig), + next_page_token=d.get("next_page_token", None), + ) + + @dataclass class ListMaterializedFeaturesResponse: materialized_features: Optional[List[MaterializedFeature]] = None @@ -3154,7 +3380,7 @@ class MaterializedFeature: offline_store_config: Optional[OfflineStoreConfig] = None - online_store_config: Optional[OnlineStore] = None + online_store_config: Optional[OnlineStoreConfig] = None pipeline_schedule_state: Optional[MaterializedFeaturePipelineScheduleState] = None """The schedule state of the materialization pipeline.""" @@ -3209,7 +3435,7 @@ def from_dict(cls, d: Dict[str, Any]) -> MaterializedFeature: last_materialization_time=d.get("last_materialization_time", None), materialized_feature_id=d.get("materialized_feature_id", None), offline_store_config=_from_dict(d, "offline_store_config", OfflineStoreConfig), - online_store_config=_from_dict(d, "online_store_config", OnlineStore), + online_store_config=_from_dict(d, "online_store_config", OnlineStoreConfig), pipeline_schedule_state=_enum(d, "pipeline_schedule_state", MaterializedFeaturePipelineScheduleState), table_name=d.get("table_name", None), ) @@ -4013,6 +4239,60 @@ def from_dict(cls, d: Dict[str, Any]) -> OnlineStore: ) +@dataclass +class OnlineStoreConfig: + """Configuration for online store destination.""" + + catalog_name: str + """The Unity Catalog catalog name. This name is also used as the Lakebase logical database name.""" + + schema_name: str + """The Unity Catalog schema name.""" + + table_name_prefix: str + """Prefix for Unity Catalog table name. The materialized feature will be stored in a Lakebase table + with this prefix and a generated postfix.""" + + online_store_name: str + """The name of the target online store.""" + + def as_dict(self) -> dict: + """Serializes the OnlineStoreConfig into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.catalog_name is not None: + body["catalog_name"] = self.catalog_name + if self.online_store_name is not None: + body["online_store_name"] = self.online_store_name + if self.schema_name is not None: + body["schema_name"] = self.schema_name + if self.table_name_prefix is not None: + body["table_name_prefix"] = self.table_name_prefix + return body + + def as_shallow_dict(self) -> dict: + """Serializes the OnlineStoreConfig into a shallow dictionary of its immediate attributes.""" + body = {} + if self.catalog_name is not None: + body["catalog_name"] = self.catalog_name + if self.online_store_name is not None: + body["online_store_name"] = self.online_store_name + if self.schema_name is not None: + body["schema_name"] = self.schema_name + if self.table_name_prefix is not None: + body["table_name_prefix"] = self.table_name_prefix + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> OnlineStoreConfig: + """Deserializes the OnlineStoreConfig from a dictionary.""" + return cls( + catalog_name=d.get("catalog_name", None), + online_store_name=d.get("online_store_name", None), + schema_name=d.get("schema_name", None), + table_name_prefix=d.get("table_name_prefix", None), + ) + + class OnlineStoreState(Enum): AVAILABLE = "AVAILABLE" @@ -4926,6 +5206,31 @@ def from_dict(cls, d: Dict[str, Any]) -> RunTag: return cls(key=d.get("key", None), value=d.get("value", None)) +@dataclass +class SchemaConfig: + json_schema: Optional[str] = None + """Schema of the JSON object in standard IETF JSON schema format (https://json-schema.org/)""" + + def as_dict(self) -> dict: + """Serializes the SchemaConfig into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.json_schema is not None: + body["json_schema"] = self.json_schema + return body + + def as_shallow_dict(self) -> dict: + """Serializes the SchemaConfig into a shallow dictionary of its immediate attributes.""" + body = {} + if self.json_schema is not None: + body["json_schema"] = self.json_schema + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> SchemaConfig: + """Deserializes the SchemaConfig from a dictionary.""" + return cls(json_schema=d.get("json_schema", None)) + + @dataclass class SearchExperimentsResponse: experiments: Optional[List[Experiment]] = None @@ -5317,6 +5622,51 @@ class Status(Enum): READY = "READY" +@dataclass +class SubscriptionMode: + assign: Optional[str] = None + """A JSON string that contains the specific topic-partitions to consume from. For example, for + '{"topicA":[0,1],"topicB":[2,4]}', topicA's 0'th and 1st partitions will be consumed from.""" + + subscribe: Optional[str] = None + """A comma-separated list of Kafka topics to read from. For example, 'topicA,topicB,topicC'.""" + + subscribe_pattern: Optional[str] = None + """A regular expression matching topics to subscribe to. For example, 'topic.*' will subscribe to + all topics starting with 'topic'.""" + + def as_dict(self) -> dict: + """Serializes the SubscriptionMode into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.assign is not None: + body["assign"] = self.assign + if self.subscribe is not None: + body["subscribe"] = self.subscribe + if self.subscribe_pattern is not None: + body["subscribe_pattern"] = self.subscribe_pattern + return body + + def as_shallow_dict(self) -> dict: + """Serializes the SubscriptionMode into a shallow dictionary of its immediate attributes.""" + body = {} + if self.assign is not None: + body["assign"] = self.assign + if self.subscribe is not None: + body["subscribe"] = self.subscribe + if self.subscribe_pattern is not None: + body["subscribe_pattern"] = self.subscribe_pattern + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> SubscriptionMode: + """Deserializes the SubscriptionMode from a dictionary.""" + return cls( + assign=d.get("assign", None), + subscribe=d.get("subscribe", None), + subscribe_pattern=d.get("subscribe_pattern", None), + ) + + @dataclass class TestRegistryWebhookResponse: body: Optional[str] = None @@ -7012,6 +7362,23 @@ def create_feature(self, feature: Feature) -> Feature: res = self._api.do("POST", "/api/2.0/feature-engineering/features", body=body, headers=headers) return Feature.from_dict(res) + def create_kafka_config(self, kafka_config: KafkaConfig) -> KafkaConfig: + """Create a Kafka config. + + :param kafka_config: :class:`KafkaConfig` + + :returns: :class:`KafkaConfig` + """ + + body = kafka_config.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", "/api/2.0/feature-engineering/features/kafka-configs", body=body, headers=headers) + return KafkaConfig.from_dict(res) + def create_materialized_feature(self, materialized_feature: MaterializedFeature) -> MaterializedFeature: """Create a materialized feature. @@ -7045,6 +7412,21 @@ def delete_feature(self, full_name: str): self._api.do("DELETE", f"/api/2.0/feature-engineering/features/{full_name}", headers=headers) + def delete_kafka_config(self, name: str): + """Delete a Kafka config. + + :param name: str + Name of the Kafka config to delete. + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do("DELETE", f"/api/2.0/feature-engineering/features/kafka-configs/kafka/{name}", headers=headers) + def delete_materialized_feature(self, materialized_feature_id: str): """Delete a materialized feature. @@ -7078,6 +7460,22 @@ def get_feature(self, full_name: str) -> Feature: res = self._api.do("GET", f"/api/2.0/feature-engineering/features/{full_name}", headers=headers) return Feature.from_dict(res) + def get_kafka_config(self, name: str) -> KafkaConfig: + """Get a Kafka config. + + :param name: str + Name of the Kafka config to get. + + :returns: :class:`KafkaConfig` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", f"/api/2.0/feature-engineering/features/kafka-configs/{name}", headers=headers) + return KafkaConfig.from_dict(res) + def get_materialized_feature(self, materialized_feature_id: str) -> MaterializedFeature: """Get a materialized feature. @@ -7125,6 +7523,39 @@ def list_features(self, *, page_size: Optional[int] = None, page_token: Optional return query["page_token"] = json["next_page_token"] + def list_kafka_configs( + self, *, page_size: Optional[int] = None, page_token: Optional[str] = None + ) -> Iterator[KafkaConfig]: + """List Kafka configs. + + :param page_size: int (optional) + The maximum number of results to return. + :param page_token: str (optional) + Pagination token to go to the next page based on a previous query. + + :returns: Iterator over :class:`KafkaConfig` + """ + + query = {} + if page_size is not None: + query["page_size"] = page_size + if page_token is not None: + query["page_token"] = page_token + headers = { + "Accept": "application/json", + } + + while True: + json = self._api.do( + "GET", "/api/2.0/feature-engineering/features/kafka-configs", query=query, headers=headers + ) + if "kafka_configs" in json: + for v in json["kafka_configs"]: + yield KafkaConfig.from_dict(v) + if "next_page_token" not in json or not json["next_page_token"]: + return + query["page_token"] = json["next_page_token"] + def list_materialized_features( self, *, feature_name: Optional[str] = None, page_size: Optional[int] = None, page_token: Optional[str] = None ) -> Iterator[MaterializedFeature]: @@ -7191,6 +7622,39 @@ def update_feature(self, full_name: str, feature: Feature, update_mask: str) -> ) return Feature.from_dict(res) + def update_kafka_config(self, name: str, kafka_config: KafkaConfig, update_mask: FieldMask) -> KafkaConfig: + """Update a Kafka config. + + :param name: str + Name that uniquely identifies this Kafka config within the metastore. This will be the identifier + used from the Feature object to reference these configs for a feature. Can be distinct from topic + name. + :param kafka_config: :class:`KafkaConfig` + The Kafka config to update. + :param update_mask: FieldMask + The list of fields to update. + + :returns: :class:`KafkaConfig` + """ + + body = kafka_config.as_dict() + query = {} + if update_mask is not None: + query["update_mask"] = update_mask.ToJsonString() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "PATCH", + f"/api/2.0/feature-engineering/features/kafka-configs/{name}", + query=query, + body=body, + headers=headers, + ) + return KafkaConfig.from_dict(res) + def update_materialized_feature( self, materialized_feature_id: str, materialized_feature: MaterializedFeature, update_mask: str ) -> MaterializedFeature: @@ -7270,6 +7734,21 @@ def delete_online_store(self, name: str): self._api.do("DELETE", f"/api/2.0/feature-store/online-stores/{name}", headers=headers) + def delete_online_table(self, online_table_name: str): + """Delete online table. + + :param online_table_name: str + The full three-part (catalog, schema, table) name of the online table. + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do("DELETE", f"/api/2.0/feature-store/online-tables/{online_table_name}", headers=headers) + def get_online_store(self, name: str) -> OnlineStore: """Get an Online Feature Store. diff --git a/databricks/sdk/service/pipelines.py b/databricks/sdk/service/pipelines.py index 9ab410419..d38088dda 100755 --- a/databricks/sdk/service/pipelines.py +++ b/databricks/sdk/service/pipelines.py @@ -22,6 +22,33 @@ # all definitions in this file are in alphabetical order +@dataclass +class ConnectionParameters: + source_catalog: Optional[str] = None + """Source catalog for initial connection. This is necessary for schema exploration in some database + systems like Oracle, and optional but nice-to-have in some other database systems like Postgres. + For Oracle databases, this maps to a service name.""" + + def as_dict(self) -> dict: + """Serializes the ConnectionParameters into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.source_catalog is not None: + body["source_catalog"] = self.source_catalog + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ConnectionParameters into a shallow dictionary of its immediate attributes.""" + body = {} + if self.source_catalog is not None: + body["source_catalog"] = self.source_catalog + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ConnectionParameters: + """Deserializes the ConnectionParameters from a dictionary.""" + return cls(source_catalog=d.get("source_catalog", None)) + + @dataclass class CreatePipelineResponse: effective_settings: Optional[PipelineSpec] = None @@ -554,6 +581,9 @@ class IngestionGatewayPipelineDefinition: """[Deprecated, use connection_name instead] Immutable. The Unity Catalog connection that this gateway pipeline uses to communicate with the source.""" + connection_parameters: Optional[ConnectionParameters] = None + """Optional, Internal. Parameters required to establish an initial connection with the source.""" + gateway_storage_name: Optional[str] = None """Optional. The Unity Catalog-compatible name for the gateway storage location. This is the destination to use for the data that is extracted by the gateway. Spark Declarative Pipelines @@ -566,6 +596,8 @@ def as_dict(self) -> dict: body["connection_id"] = self.connection_id if self.connection_name is not None: body["connection_name"] = self.connection_name + if self.connection_parameters: + body["connection_parameters"] = self.connection_parameters.as_dict() if self.gateway_storage_catalog is not None: body["gateway_storage_catalog"] = self.gateway_storage_catalog if self.gateway_storage_name is not None: @@ -581,6 +613,8 @@ def as_shallow_dict(self) -> dict: body["connection_id"] = self.connection_id if self.connection_name is not None: body["connection_name"] = self.connection_name + if self.connection_parameters: + body["connection_parameters"] = self.connection_parameters if self.gateway_storage_catalog is not None: body["gateway_storage_catalog"] = self.gateway_storage_catalog if self.gateway_storage_name is not None: @@ -595,6 +629,7 @@ def from_dict(cls, d: Dict[str, Any]) -> IngestionGatewayPipelineDefinition: return cls( connection_id=d.get("connection_id", None), connection_name=d.get("connection_name", None), + connection_parameters=_from_dict(d, "connection_parameters", ConnectionParameters), gateway_storage_catalog=d.get("gateway_storage_catalog", None), gateway_storage_name=d.get("gateway_storage_name", None), gateway_storage_schema=d.get("gateway_storage_schema", None), @@ -607,6 +642,11 @@ class IngestionPipelineDefinition: """Immutable. The Unity Catalog connection that this ingestion pipeline uses to communicate with the source. This is used with connectors for applications like Salesforce, Workday, and so on.""" + ingest_from_uc_foreign_catalog: Optional[bool] = None + """Immutable. If set to true, the pipeline will ingest tables from the UC foreign catalogs directly + without the need to specify a UC connection or ingestion gateway. The `source_catalog` fields in + objects of IngestionConfig are interpreted as the UC foreign catalogs to ingest from.""" + ingestion_gateway_id: Optional[str] = None """Immutable. Identifier for the gateway that is used by this ingestion pipeline to communicate with the source database. This is used with connectors to databases like SQL Server.""" @@ -634,6 +674,8 @@ def as_dict(self) -> dict: body = {} if self.connection_name is not None: body["connection_name"] = self.connection_name + if self.ingest_from_uc_foreign_catalog is not None: + body["ingest_from_uc_foreign_catalog"] = self.ingest_from_uc_foreign_catalog if self.ingestion_gateway_id is not None: body["ingestion_gateway_id"] = self.ingestion_gateway_id if self.netsuite_jar_path is not None: @@ -653,6 +695,8 @@ def as_shallow_dict(self) -> dict: body = {} if self.connection_name is not None: body["connection_name"] = self.connection_name + if self.ingest_from_uc_foreign_catalog is not None: + body["ingest_from_uc_foreign_catalog"] = self.ingest_from_uc_foreign_catalog if self.ingestion_gateway_id is not None: body["ingestion_gateway_id"] = self.ingestion_gateway_id if self.netsuite_jar_path is not None: @@ -672,6 +716,7 @@ def from_dict(cls, d: Dict[str, Any]) -> IngestionPipelineDefinition: """Deserializes the IngestionPipelineDefinition from a dictionary.""" return cls( connection_name=d.get("connection_name", None), + ingest_from_uc_foreign_catalog=d.get("ingest_from_uc_foreign_catalog", None), ingestion_gateway_id=d.get("ingestion_gateway_id", None), netsuite_jar_path=d.get("netsuite_jar_path", None), objects=_repeated_dict(d, "objects", IngestionConfig), @@ -828,31 +873,20 @@ def from_dict(cls, d: Dict[str, Any]) -> IngestionPipelineDefinitionWorkdayRepor class IngestionSourceType(Enum): BIGQUERY = "BIGQUERY" - CONFLUENCE = "CONFLUENCE" DYNAMICS365 = "DYNAMICS365" FOREIGN_CATALOG = "FOREIGN_CATALOG" GA4_RAW_DATA = "GA4_RAW_DATA" - GOOGLE_ADS = "GOOGLE_ADS" - GUIDEWIRE = "GUIDEWIRE" - HUBSPOT = "HUBSPOT" MANAGED_POSTGRESQL = "MANAGED_POSTGRESQL" - META_MARKETING = "META_MARKETING" MYSQL = "MYSQL" NETSUITE = "NETSUITE" ORACLE = "ORACLE" POSTGRESQL = "POSTGRESQL" - REDSHIFT = "REDSHIFT" SALESFORCE = "SALESFORCE" - SALESFORCE_MARKETING_CLOUD = "SALESFORCE_MARKETING_CLOUD" SERVICENOW = "SERVICENOW" SHAREPOINT = "SHAREPOINT" - SQLDW = "SQLDW" SQLSERVER = "SQLSERVER" TERADATA = "TERADATA" - TIKTOK_ADS = "TIKTOK_ADS" - WORKDAY_HCM = "WORKDAY_HCM" WORKDAY_RAAS = "WORKDAY_RAAS" - ZENDESK = "ZENDESK" @dataclass @@ -2526,6 +2560,97 @@ def from_dict(cls, d: Dict[str, Any]) -> RestartWindow: ) +@dataclass +class RewindDatasetSpec: + """Configuration for rewinding a specific dataset.""" + + cascade: Optional[bool] = None + """Whether to cascade the rewind to dependent datasets. Must be specified.""" + + identifier: Optional[str] = None + """The identifier of the dataset (e.g., "main.foo.tbl1").""" + + reset_checkpoints: Optional[bool] = None + """Whether to reset checkpoints for this dataset.""" + + def as_dict(self) -> dict: + """Serializes the RewindDatasetSpec into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.cascade is not None: + body["cascade"] = self.cascade + if self.identifier is not None: + body["identifier"] = self.identifier + if self.reset_checkpoints is not None: + body["reset_checkpoints"] = self.reset_checkpoints + return body + + def as_shallow_dict(self) -> dict: + """Serializes the RewindDatasetSpec into a shallow dictionary of its immediate attributes.""" + body = {} + if self.cascade is not None: + body["cascade"] = self.cascade + if self.identifier is not None: + body["identifier"] = self.identifier + if self.reset_checkpoints is not None: + body["reset_checkpoints"] = self.reset_checkpoints + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> RewindDatasetSpec: + """Deserializes the RewindDatasetSpec from a dictionary.""" + return cls( + cascade=d.get("cascade", None), + identifier=d.get("identifier", None), + reset_checkpoints=d.get("reset_checkpoints", None), + ) + + +@dataclass +class RewindSpec: + """Information about a rewind being requested for this pipeline or some of the datasets in it.""" + + datasets: Optional[List[RewindDatasetSpec]] = None + """List of datasets to rewind with specific configuration for each. When not specified, all + datasets will be rewound with cascade = true and reset_checkpoints = true.""" + + dry_run: Optional[bool] = None + """If true, this is a dry run and we should emit the RewindSummary but not perform the rewind.""" + + rewind_timestamp: Optional[str] = None + """The base timestamp to rewind to. Must be specified.""" + + def as_dict(self) -> dict: + """Serializes the RewindSpec into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.datasets: + body["datasets"] = [v.as_dict() for v in self.datasets] + if self.dry_run is not None: + body["dry_run"] = self.dry_run + if self.rewind_timestamp is not None: + body["rewind_timestamp"] = self.rewind_timestamp + return body + + def as_shallow_dict(self) -> dict: + """Serializes the RewindSpec into a shallow dictionary of its immediate attributes.""" + body = {} + if self.datasets: + body["datasets"] = self.datasets + if self.dry_run is not None: + body["dry_run"] = self.dry_run + if self.rewind_timestamp is not None: + body["rewind_timestamp"] = self.rewind_timestamp + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> RewindSpec: + """Deserializes the RewindSpec from a dictionary.""" + return cls( + datasets=_repeated_dict(d, "datasets", RewindDatasetSpec), + dry_run=d.get("dry_run", None), + rewind_timestamp=d.get("rewind_timestamp", None), + ) + + @dataclass class RunAs: """Write-only setting, available only in Create/Update calls. Specifies the user or service @@ -3483,8 +3608,8 @@ def create( return CreatePipelineResponse.from_dict(res) def delete(self, pipeline_id: str): - """Deletes a pipeline. Deleting a pipeline is a permanent action that stops and removes the pipeline and - its tables. You cannot undo this action. + """Deletes a pipeline. If the pipeline publishes to Unity Catalog, pipeline deletion will cascade to all + pipeline tables. Please reach out to Databricks support for assistance to undo this action. :param pipeline_id: str @@ -3741,6 +3866,7 @@ def start_update( full_refresh: Optional[bool] = None, full_refresh_selection: Optional[List[str]] = None, refresh_selection: Optional[List[str]] = None, + rewind_spec: Optional[RewindSpec] = None, validate_only: Optional[bool] = None, ) -> StartUpdateResponse: """Starts a new update for the pipeline. If there is already an active update for the pipeline, the @@ -3758,6 +3884,8 @@ def start_update( A list of tables to update without fullRefresh. If both refresh_selection and full_refresh_selection are empty, this is a full graph update. Full Refresh on a table means that the states of the table will be reset before the refresh. + :param rewind_spec: :class:`RewindSpec` (optional) + The information about the requested rewind operation. If specified this is a rewind mode update. :param validate_only: bool (optional) If true, this update only validates the correctness of pipeline source code but does not materialize or publish any datasets. @@ -3774,6 +3902,8 @@ def start_update( body["full_refresh_selection"] = [v for v in full_refresh_selection] if refresh_selection is not None: body["refresh_selection"] = [v for v in refresh_selection] + if rewind_spec is not None: + body["rewind_spec"] = rewind_spec.as_dict() if validate_only is not None: body["validate_only"] = validate_only headers = { diff --git a/databricks/sdk/service/settings.py b/databricks/sdk/service/settings.py index c6126a23e..99996e980 100755 --- a/databricks/sdk/service/settings.py +++ b/databricks/sdk/service/settings.py @@ -1180,6 +1180,8 @@ def from_dict(cls, d: Dict[str, Any]) -> CustomerFacingNetworkConnectivityConfig class CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionState(Enum): + CREATE_FAILED = "CREATE_FAILED" + CREATING = "CREATING" DISCONNECTED = "DISCONNECTED" ESTABLISHED = "ESTABLISHED" EXPIRED = "EXPIRED" @@ -3825,6 +3827,8 @@ def from_dict(cls, d: Dict[str, Any]) -> NccAzurePrivateEndpointRule: class NccAzurePrivateEndpointRuleConnectionState(Enum): + CREATE_FAILED = "CREATE_FAILED" + CREATING = "CREATING" DISCONNECTED = "DISCONNECTED" ESTABLISHED = "ESTABLISHED" EXPIRED = "EXPIRED" @@ -4165,8 +4169,6 @@ def from_dict(cls, d: Dict[str, Any]) -> NccPrivateEndpointRule: class NccPrivateEndpointRulePrivateLinkConnectionState(Enum): - CREATE_FAILED = "CREATE_FAILED" - CREATING = "CREATING" DISCONNECTED = "DISCONNECTED" ESTABLISHED = "ESTABLISHED" EXPIRED = "EXPIRED" diff --git a/databricks/sdk/service/settingsv2.py b/databricks/sdk/service/settingsv2.py index a529d7a5a..fabd932ba 100755 --- a/databricks/sdk/service/settingsv2.py +++ b/databricks/sdk/service/settingsv2.py @@ -486,39 +486,72 @@ class RestrictWorkspaceAdminsMessageStatus(Enum): @dataclass class Setting: aibi_dashboard_embedding_access_policy: Optional[AibiDashboardEmbeddingAccessPolicy] = None + """Setting value for aibi_dashboard_embedding_access_policy setting. This is the setting value set + by consumers, check effective_aibi_dashboard_embedding_access_policy for final setting value.""" aibi_dashboard_embedding_approved_domains: Optional[AibiDashboardEmbeddingApprovedDomains] = None + """Setting value for aibi_dashboard_embedding_approved_domains setting. This is the setting value + set by consumers, check effective_aibi_dashboard_embedding_approved_domains for final setting + value.""" automatic_cluster_update_workspace: Optional[ClusterAutoRestartMessage] = None + """Setting value for automatic_cluster_update_workspace setting. This is the setting value set by + consumers, check effective_automatic_cluster_update_workspace for final setting value.""" boolean_val: Optional[BooleanMessage] = None + """Setting value for boolean type setting. This is the setting value set by consumers, check + effective_boolean_val for final setting value.""" effective_aibi_dashboard_embedding_access_policy: Optional[AibiDashboardEmbeddingAccessPolicy] = None + """Effective setting value for aibi_dashboard_embedding_access_policy setting. This is the final + effective value of setting. To set a value use aibi_dashboard_embedding_access_policy.""" effective_aibi_dashboard_embedding_approved_domains: Optional[AibiDashboardEmbeddingApprovedDomains] = None + """Effective setting value for aibi_dashboard_embedding_approved_domains setting. This is the final + effective value of setting. To set a value use aibi_dashboard_embedding_approved_domains.""" effective_automatic_cluster_update_workspace: Optional[ClusterAutoRestartMessage] = None + """Effective setting value for automatic_cluster_update_workspace setting. This is the final + effective value of setting. To set a value use automatic_cluster_update_workspace.""" effective_boolean_val: Optional[BooleanMessage] = None + """Effective setting value for boolean type setting. This is the final effective value of setting. + To set a value use boolean_val.""" effective_integer_val: Optional[IntegerMessage] = None + """Effective setting value for integer type setting. This is the final effective value of setting. + To set a value use integer_val.""" effective_personal_compute: Optional[PersonalComputeMessage] = None + """Effective setting value for personal_compute setting. This is the final effective value of + setting. To set a value use personal_compute.""" effective_restrict_workspace_admins: Optional[RestrictWorkspaceAdminsMessage] = None + """Effective setting value for restrict_workspace_admins setting. This is the final effective value + of setting. To set a value use restrict_workspace_admins.""" effective_string_val: Optional[StringMessage] = None + """Effective setting value for string type setting. This is the final effective value of setting. + To set a value use string_val.""" integer_val: Optional[IntegerMessage] = None + """Setting value for integer type setting. This is the setting value set by consumers, check + effective_integer_val for final setting value.""" name: Optional[str] = None """Name of the setting.""" personal_compute: Optional[PersonalComputeMessage] = None + """Setting value for personal_compute setting. This is the setting value set by consumers, check + effective_personal_compute for final setting value.""" restrict_workspace_admins: Optional[RestrictWorkspaceAdminsMessage] = None + """Setting value for restrict_workspace_admins setting. This is the setting value set by consumers, + check effective_restrict_workspace_admins for final setting value.""" string_val: Optional[StringMessage] = None + """Setting value for string type setting. This is the setting value set by consumers, check + effective_string_val for final setting value.""" def as_dict(self) -> dict: """Serializes the Setting into a dictionary suitable for use as a JSON request body.""" @@ -658,7 +691,8 @@ class SettingsMetadata: """Name of the setting.""" type: Optional[str] = None - """Type of the setting. To set this setting, the value sent must match this type.""" + """Sample message depicting the type of the setting. To set this setting, the value sent must match + this type.""" def as_dict(self) -> dict: """Serializes the SettingsMetadata into a dictionary suitable for use as a JSON request body.""" @@ -786,7 +820,9 @@ def list_account_settings_metadata( def patch_public_account_setting(self, name: str, setting: Setting) -> Setting: """Patch a setting value at account level. See :method:settingsv2/listaccountsettingsmetadata for list of - setting available via public APIs at account level. + setting available via public APIs at account level. To determine the correct field to include in a + patch request, refer to the type field of the setting returned in the + :method:settingsv2/listaccountsettingsmetadata response. :param name: str :param setting: :class:`Setting` @@ -817,6 +853,7 @@ def get_public_workspace_setting(self, name: str) -> Setting: of setting available via public APIs. :param name: str + Name of the setting :returns: :class:`Setting` """ @@ -869,9 +906,12 @@ def list_workspace_settings_metadata( def patch_public_workspace_setting(self, name: str, setting: Setting) -> Setting: """Patch a setting value at workspace level. See :method:settingsv2/listworkspacesettingsmetadata for - list of setting available via public APIs at workspace level. + list of setting available via public APIs at workspace level. To determine the correct field to + include in a patch request, refer to the type field of the setting returned in the + :method:settingsv2/listworkspacesettingsmetadata response. :param name: str + Name of the setting :param setting: :class:`Setting` :returns: :class:`Setting` diff --git a/databricks/sdk/service/sql.py b/databricks/sdk/service/sql.py index 2b839f687..403f401d3 100755 --- a/databricks/sdk/service/sql.py +++ b/databricks/sdk/service/sql.py @@ -950,6 +950,7 @@ class AlertV2OperandColumn: name: str aggregation: Optional[Aggregation] = None + """If not set, the behavior is equivalent to using `First row` in the UI.""" display: Optional[str] = None @@ -6302,7 +6303,6 @@ class TerminationReasonCode(Enum): DOCKER_IMAGE_PULL_FAILURE = "DOCKER_IMAGE_PULL_FAILURE" DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION = "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION" DOCKER_INVALID_OS_EXCEPTION = "DOCKER_INVALID_OS_EXCEPTION" - DRIVER_DNS_RESOLUTION_FAILURE = "DRIVER_DNS_RESOLUTION_FAILURE" DRIVER_EVICTION = "DRIVER_EVICTION" DRIVER_LAUNCH_TIMEOUT = "DRIVER_LAUNCH_TIMEOUT" DRIVER_NODE_UNREACHABLE = "DRIVER_NODE_UNREACHABLE" @@ -6381,8 +6381,6 @@ class TerminationReasonCode(Enum): NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG = "NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG" NETWORK_CONFIGURATION_FAILURE = "NETWORK_CONFIGURATION_FAILURE" NFS_MOUNT_FAILURE = "NFS_MOUNT_FAILURE" - NO_ACTIVATED_K8S = "NO_ACTIVATED_K8S" - NO_ACTIVATED_K8S_TESTING_TAG = "NO_ACTIVATED_K8S_TESTING_TAG" NO_MATCHED_K8S = "NO_MATCHED_K8S" NO_MATCHED_K8S_TESTING_TAG = "NO_MATCHED_K8S_TESTING_TAG" NPIP_TUNNEL_SETUP_FAILURE = "NPIP_TUNNEL_SETUP_FAILURE" @@ -6395,7 +6393,6 @@ class TerminationReasonCode(Enum): SECRET_CREATION_FAILURE = "SECRET_CREATION_FAILURE" SECRET_PERMISSION_DENIED = "SECRET_PERMISSION_DENIED" SECRET_RESOLUTION_ERROR = "SECRET_RESOLUTION_ERROR" - SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION = "SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION" SECURITY_DAEMON_REGISTRATION_EXCEPTION = "SECURITY_DAEMON_REGISTRATION_EXCEPTION" SELF_BOOTSTRAP_FAILURE = "SELF_BOOTSTRAP_FAILURE" SERVERLESS_LONG_RUNNING_TERMINATED = "SERVERLESS_LONG_RUNNING_TERMINATED" diff --git a/databricks/sdk/service/tags.py b/databricks/sdk/service/tags.py index 91bdc27ef..15102543d 100755 --- a/databricks/sdk/service/tags.py +++ b/databricks/sdk/service/tags.py @@ -134,16 +134,24 @@ def from_dict(cls, d: Dict[str, Any]) -> Value: class TagPoliciesAPI: - """The Tag Policy API allows you to manage policies for governed tags in Databricks. Permissions for tag - policies can be managed using the [Account Access Control Proxy API]. + """The Tag Policy API allows you to manage policies for governed tags in Databricks. For Terraform usage, see + the [Tag Policy Terraform documentation]. Permissions for tag policies can be managed using the [Account + Access Control Proxy API]. - [Account Access Control Proxy API]: https://docs.databricks.com/api/workspace/accountaccesscontrolproxy""" + [Account Access Control Proxy API]: https://docs.databricks.com/api/workspace/accountaccesscontrolproxy + [Tag Policy Terraform documentation]: https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/tag_policy + """ def __init__(self, api_client): self._api = api_client def create_tag_policy(self, tag_policy: TagPolicy) -> TagPolicy: - """Creates a new tag policy, making the associated tag key governed. + """Creates a new tag policy, making the associated tag key governed. For Terraform usage, see the [Tag + Policy Terraform documentation]. To manage permissions for tag policies, use the [Account Access + Control Proxy API]. + + [Account Access Control Proxy API]: https://docs.databricks.com/api/workspace/accountaccesscontrolproxy + [Tag Policy Terraform documentation]: https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/tag_policy :param tag_policy: :class:`TagPolicy` @@ -160,7 +168,10 @@ def create_tag_policy(self, tag_policy: TagPolicy) -> TagPolicy: return TagPolicy.from_dict(res) def delete_tag_policy(self, tag_key: str): - """Deletes a tag policy by its associated governed tag's key, leaving that tag key ungoverned. + """Deletes a tag policy by its associated governed tag's key, leaving that tag key ungoverned. For + Terraform usage, see the [Tag Policy Terraform documentation]. + + [Tag Policy Terraform documentation]: https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/tag_policy :param tag_key: str @@ -174,7 +185,12 @@ def delete_tag_policy(self, tag_key: str): self._api.do("DELETE", f"/api/2.1/tag-policies/{tag_key}", headers=headers) def get_tag_policy(self, tag_key: str) -> TagPolicy: - """Gets a single tag policy by its associated governed tag's key. + """Gets a single tag policy by its associated governed tag's key. For Terraform usage, see the [Tag + Policy Terraform documentation]. To list granted permissions for tag policies, use the [Account Access + Control Proxy API]. + + [Account Access Control Proxy API]: https://docs.databricks.com/api/workspace/accountaccesscontrolproxy + [Tag Policy Terraform documentation]: https://registry.terraform.io/providers/databricks/databricks/latest/docs/data-sources/tag_policy :param tag_key: str @@ -191,7 +207,12 @@ def get_tag_policy(self, tag_key: str) -> TagPolicy: def list_tag_policies( self, *, page_size: Optional[int] = None, page_token: Optional[str] = None ) -> Iterator[TagPolicy]: - """Lists the tag policies for all governed tags in the account. + """Lists the tag policies for all governed tags in the account. For Terraform usage, see the [Tag Policy + Terraform documentation]. To list granted permissions for tag policies, use the [Account Access + Control Proxy API]. + + [Account Access Control Proxy API]: https://docs.databricks.com/api/workspace/accountaccesscontrolproxy + [Tag Policy Terraform documentation]: https://registry.terraform.io/providers/databricks/databricks/latest/docs/data-sources/tag_policies :param page_size: int (optional) The maximum number of results to return in this request. Fewer results may be returned than @@ -222,7 +243,12 @@ def list_tag_policies( query["page_token"] = json["next_page_token"] def update_tag_policy(self, tag_key: str, tag_policy: TagPolicy, update_mask: str) -> TagPolicy: - """Updates an existing tag policy for a single governed tag. + """Updates an existing tag policy for a single governed tag. For Terraform usage, see the [Tag Policy + Terraform documentation]. To manage permissions for tag policies, use the [Account Access Control + Proxy API]. + + [Account Access Control Proxy API]: https://docs.databricks.com/api/workspace/accountaccesscontrolproxy + [Tag Policy Terraform documentation]: https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/tag_policy :param tag_key: str :param tag_policy: :class:`TagPolicy` diff --git a/databricks/sdk/service/vectorsearch.py b/databricks/sdk/service/vectorsearch.py index a0b731ffa..567447366 100755 --- a/databricks/sdk/service/vectorsearch.py +++ b/databricks/sdk/service/vectorsearch.py @@ -26,11 +26,16 @@ class ColumnInfo: name: Optional[str] = None """Name of the column.""" + type_text: Optional[str] = None + """Data type of the column (e.g., "string", "int", "array")""" + def as_dict(self) -> dict: """Serializes the ColumnInfo into a dictionary suitable for use as a JSON request body.""" body = {} if self.name is not None: body["name"] = self.name + if self.type_text is not None: + body["type_text"] = self.type_text return body def as_shallow_dict(self) -> dict: @@ -38,12 +43,14 @@ def as_shallow_dict(self) -> dict: body = {} if self.name is not None: body["name"] = self.name + if self.type_text is not None: + body["type_text"] = self.type_text return body @classmethod def from_dict(cls, d: Dict[str, Any]) -> ColumnInfo: """Deserializes the ColumnInfo from a dictionary.""" - return cls(name=d.get("name", None)) + return cls(name=d.get("name", None), type_text=d.get("type_text", None)) @dataclass @@ -737,6 +744,153 @@ def from_dict(cls, d: Dict[str, Any]) -> MapStringValueEntry: return cls(key=d.get("key", None), value=_from_dict(d, "value", Value)) +@dataclass +class Metric: + """Metric specification""" + + labels: Optional[List[MetricLabel]] = None + """Metric labels""" + + name: Optional[str] = None + """Metric name""" + + percentile: Optional[float] = None + """Percentile for the metric""" + + def as_dict(self) -> dict: + """Serializes the Metric into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.labels: + body["labels"] = [v.as_dict() for v in self.labels] + if self.name is not None: + body["name"] = self.name + if self.percentile is not None: + body["percentile"] = self.percentile + return body + + def as_shallow_dict(self) -> dict: + """Serializes the Metric into a shallow dictionary of its immediate attributes.""" + body = {} + if self.labels: + body["labels"] = self.labels + if self.name is not None: + body["name"] = self.name + if self.percentile is not None: + body["percentile"] = self.percentile + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> Metric: + """Deserializes the Metric from a dictionary.""" + return cls( + labels=_repeated_dict(d, "labels", MetricLabel), + name=d.get("name", None), + percentile=d.get("percentile", None), + ) + + +@dataclass +class MetricLabel: + """Label for a metric""" + + name: Optional[str] = None + """Label name""" + + value: Optional[str] = None + """Label value""" + + def as_dict(self) -> dict: + """Serializes the MetricLabel into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.name is not None: + body["name"] = self.name + if self.value is not None: + body["value"] = self.value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the MetricLabel into a shallow dictionary of its immediate attributes.""" + body = {} + if self.name is not None: + body["name"] = self.name + if self.value is not None: + body["value"] = self.value + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> MetricLabel: + """Deserializes the MetricLabel from a dictionary.""" + return cls(name=d.get("name", None), value=d.get("value", None)) + + +@dataclass +class MetricValue: + """Single metric value at a specific timestamp""" + + timestamp: Optional[int] = None + """Timestamp of the metric value (milliseconds since epoch)""" + + value: Optional[float] = None + """Metric value""" + + def as_dict(self) -> dict: + """Serializes the MetricValue into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.timestamp is not None: + body["timestamp"] = self.timestamp + if self.value is not None: + body["value"] = self.value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the MetricValue into a shallow dictionary of its immediate attributes.""" + body = {} + if self.timestamp is not None: + body["timestamp"] = self.timestamp + if self.value is not None: + body["value"] = self.value + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> MetricValue: + """Deserializes the MetricValue from a dictionary.""" + return cls(timestamp=d.get("timestamp", None), value=d.get("value", None)) + + +@dataclass +class MetricValues: + """Collection of metric values for a specific metric""" + + metric: Optional[Metric] = None + """Metric specification""" + + values: Optional[List[MetricValue]] = None + """Time series of metric values""" + + def as_dict(self) -> dict: + """Serializes the MetricValues into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.metric: + body["metric"] = self.metric.as_dict() + if self.values: + body["values"] = [v.as_dict() for v in self.values] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the MetricValues into a shallow dictionary of its immediate attributes.""" + body = {} + if self.metric: + body["metric"] = self.metric + if self.values: + body["values"] = self.values + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> MetricValues: + """Deserializes the MetricValues from a dictionary.""" + return cls(metric=_from_dict(d, "metric", Metric), values=_repeated_dict(d, "values", MetricValue)) + + @dataclass class MiniVectorIndex: creator: Optional[str] = None @@ -998,6 +1152,44 @@ def from_dict(cls, d: Dict[str, Any]) -> ResultManifest: return cls(column_count=d.get("column_count", None), columns=_repeated_dict(d, "columns", ColumnInfo)) +@dataclass +class RetrieveUserVisibleMetricsResponse: + """Response containing user-visible metrics""" + + metric_values: Optional[List[MetricValues]] = None + """Collection of metric values""" + + next_page_token: Optional[str] = None + """A token that can be used to get the next page of results. If not present, there are no more + results to show.""" + + def as_dict(self) -> dict: + """Serializes the RetrieveUserVisibleMetricsResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.metric_values: + body["metric_values"] = [v.as_dict() for v in self.metric_values] + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + def as_shallow_dict(self) -> dict: + """Serializes the RetrieveUserVisibleMetricsResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.metric_values: + body["metric_values"] = self.metric_values + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> RetrieveUserVisibleMetricsResponse: + """Deserializes the RetrieveUserVisibleMetricsResponse from a dictionary.""" + return cls( + metric_values=_repeated_dict(d, "metric_values", MetricValues), + next_page_token=d.get("next_page_token", None), + ) + + @dataclass class ScanVectorIndexResponse: """Response to a scan vector index request.""" @@ -1519,6 +1711,53 @@ def list_endpoints(self, *, page_token: Optional[str] = None) -> Iterator[Endpoi return query["page_token"] = json["next_page_token"] + def retrieve_user_visible_metrics( + self, + name: str, + *, + end_time: Optional[str] = None, + granularity_in_seconds: Optional[int] = None, + metrics: Optional[List[Metric]] = None, + page_token: Optional[str] = None, + start_time: Optional[str] = None, + ) -> RetrieveUserVisibleMetricsResponse: + """Retrieve user-visible metrics for an endpoint + + :param name: str + Vector search endpoint name + :param end_time: str (optional) + End time for metrics query + :param granularity_in_seconds: int (optional) + Granularity in seconds + :param metrics: List[:class:`Metric`] (optional) + List of metrics to retrieve + :param page_token: str (optional) + Token for pagination + :param start_time: str (optional) + Start time for metrics query + + :returns: :class:`RetrieveUserVisibleMetricsResponse` + """ + + body = {} + if end_time is not None: + body["end_time"] = end_time + if granularity_in_seconds is not None: + body["granularity_in_seconds"] = granularity_in_seconds + if metrics is not None: + body["metrics"] = [v.as_dict() for v in metrics] + if page_token is not None: + body["page_token"] = page_token + if start_time is not None: + body["start_time"] = start_time + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", f"/api/2.0/vector-search/endpoints/{name}/metrics", body=body, headers=headers) + return RetrieveUserVisibleMetricsResponse.from_dict(res) + def update_endpoint_budget_policy( self, endpoint_name: str, budget_policy_id: str ) -> PatchEndpointBudgetPolicyResponse: diff --git a/databricks/sdk/service/workspace.py b/databricks/sdk/service/workspace.py index 7ef18fddb..7eda4c2d7 100755 --- a/databricks/sdk/service/workspace.py +++ b/databricks/sdk/service/workspace.py @@ -2658,7 +2658,9 @@ def get_permission_levels( """Gets the permission levels that a user can have on an object. :param workspace_object_type: str - The workspace object type for which to get or manage permissions. + The workspace object type for which to get or manage permissions. Could be one of the following: + alerts, alertsv2, dashboards, dbsql-dashboards, directories, experiments, files, genie, notebooks, + queries :param workspace_object_id: str The workspace object for which to get or manage permissions. @@ -2681,7 +2683,9 @@ def get_permissions(self, workspace_object_type: str, workspace_object_id: str) parent objects or root object. :param workspace_object_type: str - The workspace object type for which to get or manage permissions. + The workspace object type for which to get or manage permissions. Could be one of the following: + alerts, alertsv2, dashboards, dbsql-dashboards, directories, experiments, files, genie, notebooks, + queries :param workspace_object_id: str The workspace object for which to get or manage permissions. @@ -2840,7 +2844,9 @@ def set_permissions( object. :param workspace_object_type: str - The workspace object type for which to get or manage permissions. + The workspace object type for which to get or manage permissions. Could be one of the following: + alerts, alertsv2, dashboards, dbsql-dashboards, directories, experiments, files, genie, notebooks, + queries :param workspace_object_id: str The workspace object for which to get or manage permissions. :param access_control_list: List[:class:`WorkspaceObjectAccessControlRequest`] (optional) @@ -2872,7 +2878,9 @@ def update_permissions( parent objects or root object. :param workspace_object_type: str - The workspace object type for which to get or manage permissions. + The workspace object type for which to get or manage permissions. Could be one of the following: + alerts, alertsv2, dashboards, dbsql-dashboards, directories, experiments, files, genie, notebooks, + queries :param workspace_object_id: str The workspace object for which to get or manage permissions. :param access_control_list: List[:class:`WorkspaceObjectAccessControlRequest`] (optional) diff --git a/docs/account/billing/usage_dashboards.rst b/docs/account/billing/usage_dashboards.rst index 4eef82411..5c325ba2e 100644 --- a/docs/account/billing/usage_dashboards.rst +++ b/docs/account/billing/usage_dashboards.rst @@ -8,13 +8,15 @@ your usage with pre-built dashboards: visualize breakdowns, analyze tag attributions, and identify cost drivers. - .. py:method:: create( [, dashboard_type: Optional[UsageDashboardType], workspace_id: Optional[int]]) -> CreateBillingUsageDashboardResponse + .. py:method:: create( [, dashboard_type: Optional[UsageDashboardType], major_version: Optional[UsageDashboardMajorVersion], workspace_id: Optional[int]]) -> CreateBillingUsageDashboardResponse Create a usage dashboard specified by workspaceId, accountId, and dashboard type. :param dashboard_type: :class:`UsageDashboardType` (optional) Workspace level usage dashboard shows usage data for the specified workspace ID. Global level usage dashboard shows usage data for all workspaces in the account. + :param major_version: :class:`UsageDashboardMajorVersion` (optional) + The major version of the usage dashboard template to use. Defaults to VERSION_1. :param workspace_id: int (optional) The workspace ID of the workspace in which the usage dashboard is created. diff --git a/docs/account/iam/groups_v2.rst b/docs/account/iam/groups_v2.rst index 9a38fb63d..622277161 100644 --- a/docs/account/iam/groups_v2.rst +++ b/docs/account/iam/groups_v2.rst @@ -52,8 +52,9 @@ .. py:method:: list( [, attributes: Optional[str], count: Optional[int], excluded_attributes: Optional[str], filter: Optional[str], sort_by: Optional[str], sort_order: Optional[ListSortOrder], start_index: Optional[int]]) -> Iterator[AccountGroup] Gets all details of the groups associated with the Databricks account. As of 08/22/2025, this endpoint - will not return members. Instead, members should be retrieved by iterating through `Get group - details`. + will no longer return members. Instead, members should be retrieved by iterating through `Get group + details`. Existing accounts that rely on this attribute will not be impacted and will continue + receiving member data as before. :param attributes: str (optional) Comma-separated list of attributes to return in response. diff --git a/docs/account/iam/workspace_assignment.rst b/docs/account/iam/workspace_assignment.rst index 2a8043172..ca78b86df 100644 --- a/docs/account/iam/workspace_assignment.rst +++ b/docs/account/iam/workspace_assignment.rst @@ -43,9 +43,9 @@ a = AccountClient() - workspace_id = os.environ["DUMMY_WORKSPACE_ID"] + workspace_id = os.environ["TEST_WORKSPACE_ID"] - all = a.workspace_assignment.list(workspace_id=workspace_id) + all = a.workspace_assignment.list(list=workspace_id) Get the permission assignments for the specified Databricks account and Databricks workspace. diff --git a/docs/account/provisioning/credentials.rst b/docs/account/provisioning/credentials.rst index b71c1707e..d63648d58 100644 --- a/docs/account/provisioning/credentials.rst +++ b/docs/account/provisioning/credentials.rst @@ -24,15 +24,15 @@ a = AccountClient() - creds = a.credentials.create( + role = a.credentials.create( credentials_name=f"sdk-{time.time_ns()}", aws_credentials=provisioning.CreateCredentialAwsCredentials( - sts_role=provisioning.CreateCredentialStsRole(role_arn=os.environ["TEST_LOGDELIVERY_ARN"]) + sts_role=provisioning.CreateCredentialStsRole(role_arn=os.environ["TEST_CROSSACCOUNT_ARN"]) ), ) # cleanup - a.credentials.delete(credentials_id=creds.credentials_id) + a.credentials.delete(credentials_id=role.credentials_id) Creates a Databricks credential configuration that represents cloud cross-account credentials for a specified account. Databricks uses this to set up network infrastructure properly to host Databricks diff --git a/docs/account/provisioning/storage.rst b/docs/account/provisioning/storage.rst index 41a04deb3..25ee5abaa 100644 --- a/docs/account/provisioning/storage.rst +++ b/docs/account/provisioning/storage.rst @@ -23,13 +23,10 @@ a = AccountClient() - bucket = a.storage.create( + storage = a.storage.create( storage_configuration_name=f"sdk-{time.time_ns()}", root_bucket_info=provisioning.RootBucketInfo(bucket_name=f"sdk-{time.time_ns()}"), ) - - # cleanup - a.storage.delete(storage_configuration_id=bucket.storage_configuration_id) Creates a Databricks storage configuration for an account. diff --git a/docs/account/settingsv2/settings_v2.rst b/docs/account/settingsv2/settings_v2.rst index 03224db02..da0206e2e 100644 --- a/docs/account/settingsv2/settings_v2.rst +++ b/docs/account/settingsv2/settings_v2.rst @@ -38,7 +38,9 @@ .. py:method:: patch_public_account_setting(name: str, setting: Setting) -> Setting Patch a setting value at account level. See :method:settingsv2/listaccountsettingsmetadata for list of - setting available via public APIs at account level. + setting available via public APIs at account level. To determine the correct field to include in a + patch request, refer to the type field of the setting returned in the + :method:settingsv2/listaccountsettingsmetadata response. :param name: str :param setting: :class:`Setting` diff --git a/docs/dbdataclasses/apps.rst b/docs/dbdataclasses/apps.rst index 320c875e1..2bc765a2a 100644 --- a/docs/dbdataclasses/apps.rst +++ b/docs/dbdataclasses/apps.rst @@ -329,9 +329,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: LARGE :value: "LARGE" - .. py:attribute:: LIQUID - :value: "LIQUID" - .. py:attribute:: MEDIUM :value: "MEDIUM" diff --git a/docs/dbdataclasses/billing.rst b/docs/dbdataclasses/billing.rst index 60f015a7b..b58ec1528 100644 --- a/docs/dbdataclasses/billing.rst +++ b/docs/dbdataclasses/billing.rst @@ -200,6 +200,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. py:class:: UsageDashboardMajorVersion + + .. py:attribute:: USAGE_DASHBOARD_MAJOR_VERSION_1 + :value: "USAGE_DASHBOARD_MAJOR_VERSION_1" + + .. py:attribute:: USAGE_DASHBOARD_MAJOR_VERSION_2 + :value: "USAGE_DASHBOARD_MAJOR_VERSION_2" + .. py:class:: UsageDashboardType .. py:attribute:: USAGE_DASHBOARD_TYPE_GLOBAL diff --git a/docs/dbdataclasses/catalog.rst b/docs/dbdataclasses/catalog.rst index 44209d4b9..ca2bb65bb 100644 --- a/docs/dbdataclasses/catalog.rst +++ b/docs/dbdataclasses/catalog.rst @@ -279,7 +279,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: ConnectionType - Next Id: 47 + Next Id: 48 .. py:attribute:: BIGQUERY :value: "BIGQUERY" @@ -305,9 +305,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: ORACLE :value: "ORACLE" - .. py:attribute:: PALANTIR - :value: "PALANTIR" - .. py:attribute:: POSTGRESQL :value: "POSTGRESQL" @@ -1500,7 +1497,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: SecurableKind - Latest kind: CONNECTION_AWS_SECRETS_MANAGER = 270; Next id:271 + Latest kind: SECRET_EXTERNAL_AWS_SECRETS_MANAGER = 273; Next id:274 .. py:attribute:: TABLE_DB_STORAGE :value: "TABLE_DB_STORAGE" @@ -1604,9 +1601,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: TABLE_FOREIGN_ORACLE :value: "TABLE_FOREIGN_ORACLE" - .. py:attribute:: TABLE_FOREIGN_PALANTIR - :value: "TABLE_FOREIGN_PALANTIR" - .. py:attribute:: TABLE_FOREIGN_POSTGRESQL :value: "TABLE_FOREIGN_POSTGRESQL" diff --git a/docs/dbdataclasses/compute.rst b/docs/dbdataclasses/compute.rst index c6064252a..7ded58e8f 100644 --- a/docs/dbdataclasses/compute.rst +++ b/docs/dbdataclasses/compute.rst @@ -1216,9 +1216,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: DOCKER_INVALID_OS_EXCEPTION :value: "DOCKER_INVALID_OS_EXCEPTION" - .. py:attribute:: DRIVER_DNS_RESOLUTION_FAILURE - :value: "DRIVER_DNS_RESOLUTION_FAILURE" - .. py:attribute:: DRIVER_EVICTION :value: "DRIVER_EVICTION" @@ -1441,12 +1438,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: NFS_MOUNT_FAILURE :value: "NFS_MOUNT_FAILURE" - .. py:attribute:: NO_ACTIVATED_K8S - :value: "NO_ACTIVATED_K8S" - - .. py:attribute:: NO_ACTIVATED_K8S_TESTING_TAG - :value: "NO_ACTIVATED_K8S_TESTING_TAG" - .. py:attribute:: NO_MATCHED_K8S :value: "NO_MATCHED_K8S" @@ -1483,9 +1474,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: SECRET_RESOLUTION_ERROR :value: "SECRET_RESOLUTION_ERROR" - .. py:attribute:: SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION - :value: "SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION" - .. py:attribute:: SECURITY_DAEMON_REGISTRATION_EXCEPTION :value: "SECURITY_DAEMON_REGISTRATION_EXCEPTION" diff --git a/docs/dbdataclasses/dashboards.rst b/docs/dbdataclasses/dashboards.rst index df004c847..af9a747b3 100644 --- a/docs/dbdataclasses/dashboards.rst +++ b/docs/dbdataclasses/dashboards.rst @@ -214,9 +214,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION :value: "INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION" - .. py:attribute:: INVALID_CHAT_COMPLETION_ARGUMENTS_JSON_EXCEPTION - :value: "INVALID_CHAT_COMPLETION_ARGUMENTS_JSON_EXCEPTION" - .. py:attribute:: INVALID_CHAT_COMPLETION_JSON_EXCEPTION :value: "INVALID_CHAT_COMPLETION_JSON_EXCEPTION" @@ -385,6 +382,13 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. py:class:: TextAttachmentPurpose + + Purpose/intent of a text attachment + + .. py:attribute:: FOLLOW_UP_QUESTION + :value: "FOLLOW_UP_QUESTION" + .. autoclass:: TrashDashboardResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/jobs.rst b/docs/dbdataclasses/jobs.rst index 04a47acf2..955b38561 100644 --- a/docs/dbdataclasses/jobs.rst +++ b/docs/dbdataclasses/jobs.rst @@ -484,6 +484,21 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ModelTriggerConfiguration + :members: + :undoc-members: + +.. py:class:: ModelTriggerConfigurationCondition + + .. py:attribute:: MODEL_ALIAS_SET + :value: "MODEL_ALIAS_SET" + + .. py:attribute:: MODEL_CREATED + :value: "MODEL_CREATED" + + .. py:attribute:: MODEL_VERSION_READY + :value: "MODEL_VERSION_READY" + .. autoclass:: NotebookOutput :members: :undoc-members: diff --git a/docs/dbdataclasses/ml.rst b/docs/dbdataclasses/ml.rst index 844e66245..accbdaaf2 100644 --- a/docs/dbdataclasses/ml.rst +++ b/docs/dbdataclasses/ml.rst @@ -66,10 +66,18 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: AuthConfig + :members: + :undoc-members: + .. autoclass:: BatchCreateMaterializedFeaturesResponse :members: :undoc-members: +.. autoclass:: ColumnIdentifier + :members: + :undoc-members: + .. py:class:: CommentActivityAction An action that a user (with sufficient permissions) could take on an activity or comment. @@ -425,6 +433,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: KafkaConfig + :members: + :undoc-members: + +.. autoclass:: KafkaSource + :members: + :undoc-members: + .. autoclass:: LineageContext :members: :undoc-members: @@ -449,6 +465,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ListKafkaConfigsResponse + :members: + :undoc-members: + .. autoclass:: ListMaterializedFeaturesResponse :members: :undoc-members: @@ -604,6 +624,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: OnlineStoreConfig + :members: + :undoc-members: + .. py:class:: OnlineStoreState .. py:attribute:: AVAILABLE @@ -840,6 +864,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: SchemaConfig + :members: + :undoc-members: + .. autoclass:: SearchExperimentsResponse :members: :undoc-members: @@ -907,6 +935,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: READY :value: "READY" +.. autoclass:: SubscriptionMode + :members: + :undoc-members: + .. autoclass:: TestRegistryWebhookResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/pipelines.rst b/docs/dbdataclasses/pipelines.rst index cc2f14411..cd6a437f8 100644 --- a/docs/dbdataclasses/pipelines.rst +++ b/docs/dbdataclasses/pipelines.rst @@ -4,6 +4,10 @@ Delta Live Tables These dataclasses are used in the SDK to represent API requests and responses for services in the ``databricks.sdk.service.pipelines`` module. .. py:currentmodule:: databricks.sdk.service.pipelines +.. autoclass:: ConnectionParameters + :members: + :undoc-members: + .. autoclass:: CreatePipelineResponse :members: :undoc-members: @@ -139,9 +143,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: BIGQUERY :value: "BIGQUERY" - .. py:attribute:: CONFLUENCE - :value: "CONFLUENCE" - .. py:attribute:: DYNAMICS365 :value: "DYNAMICS365" @@ -151,21 +152,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: GA4_RAW_DATA :value: "GA4_RAW_DATA" - .. py:attribute:: GOOGLE_ADS - :value: "GOOGLE_ADS" - - .. py:attribute:: GUIDEWIRE - :value: "GUIDEWIRE" - - .. py:attribute:: HUBSPOT - :value: "HUBSPOT" - .. py:attribute:: MANAGED_POSTGRESQL :value: "MANAGED_POSTGRESQL" - .. py:attribute:: META_MARKETING - :value: "META_MARKETING" - .. py:attribute:: MYSQL :value: "MYSQL" @@ -178,42 +167,24 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: POSTGRESQL :value: "POSTGRESQL" - .. py:attribute:: REDSHIFT - :value: "REDSHIFT" - .. py:attribute:: SALESFORCE :value: "SALESFORCE" - .. py:attribute:: SALESFORCE_MARKETING_CLOUD - :value: "SALESFORCE_MARKETING_CLOUD" - .. py:attribute:: SERVICENOW :value: "SERVICENOW" .. py:attribute:: SHAREPOINT :value: "SHAREPOINT" - .. py:attribute:: SQLDW - :value: "SQLDW" - .. py:attribute:: SQLSERVER :value: "SQLSERVER" .. py:attribute:: TERADATA :value: "TERADATA" - .. py:attribute:: TIKTOK_ADS - :value: "TIKTOK_ADS" - - .. py:attribute:: WORKDAY_HCM - :value: "WORKDAY_HCM" - .. py:attribute:: WORKDAY_RAAS :value: "WORKDAY_RAAS" - .. py:attribute:: ZENDESK - :value: "ZENDESK" - .. autoclass:: ListPipelineEventsResponse :members: :undoc-members: @@ -398,6 +369,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: RewindDatasetSpec + :members: + :undoc-members: + +.. autoclass:: RewindSpec + :members: + :undoc-members: + .. autoclass:: RunAs :members: :undoc-members: diff --git a/docs/dbdataclasses/settings.rst b/docs/dbdataclasses/settings.rst index b47a84770..a1687d876 100644 --- a/docs/dbdataclasses/settings.rst +++ b/docs/dbdataclasses/settings.rst @@ -208,6 +208,12 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionState + .. py:attribute:: CREATE_FAILED + :value: "CREATE_FAILED" + + .. py:attribute:: CREATING + :value: "CREATING" + .. py:attribute:: DISCONNECTED :value: "DISCONNECTED" @@ -601,6 +607,12 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: NccAzurePrivateEndpointRuleConnectionState + .. py:attribute:: CREATE_FAILED + :value: "CREATE_FAILED" + + .. py:attribute:: CREATING + :value: "CREATING" + .. py:attribute:: DISCONNECTED :value: "DISCONNECTED" @@ -641,12 +653,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: NccPrivateEndpointRulePrivateLinkConnectionState - .. py:attribute:: CREATE_FAILED - :value: "CREATE_FAILED" - - .. py:attribute:: CREATING - :value: "CREATING" - .. py:attribute:: DISCONNECTED :value: "DISCONNECTED" diff --git a/docs/dbdataclasses/sql.rst b/docs/dbdataclasses/sql.rst index 865aba6c5..66f6340c9 100644 --- a/docs/dbdataclasses/sql.rst +++ b/docs/dbdataclasses/sql.rst @@ -1296,9 +1296,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: DOCKER_INVALID_OS_EXCEPTION :value: "DOCKER_INVALID_OS_EXCEPTION" - .. py:attribute:: DRIVER_DNS_RESOLUTION_FAILURE - :value: "DRIVER_DNS_RESOLUTION_FAILURE" - .. py:attribute:: DRIVER_EVICTION :value: "DRIVER_EVICTION" @@ -1521,12 +1518,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: NFS_MOUNT_FAILURE :value: "NFS_MOUNT_FAILURE" - .. py:attribute:: NO_ACTIVATED_K8S - :value: "NO_ACTIVATED_K8S" - - .. py:attribute:: NO_ACTIVATED_K8S_TESTING_TAG - :value: "NO_ACTIVATED_K8S_TESTING_TAG" - .. py:attribute:: NO_MATCHED_K8S :value: "NO_MATCHED_K8S" @@ -1563,9 +1554,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: SECRET_RESOLUTION_ERROR :value: "SECRET_RESOLUTION_ERROR" - .. py:attribute:: SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION - :value: "SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION" - .. py:attribute:: SECURITY_DAEMON_REGISTRATION_EXCEPTION :value: "SECURITY_DAEMON_REGISTRATION_EXCEPTION" diff --git a/docs/dbdataclasses/vectorsearch.rst b/docs/dbdataclasses/vectorsearch.rst index b8bd46536..33e37bdd8 100644 --- a/docs/dbdataclasses/vectorsearch.rst +++ b/docs/dbdataclasses/vectorsearch.rst @@ -109,6 +109,22 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: Metric + :members: + :undoc-members: + +.. autoclass:: MetricLabel + :members: + :undoc-members: + +.. autoclass:: MetricValue + :members: + :undoc-members: + +.. autoclass:: MetricValues + :members: + :undoc-members: + .. autoclass:: MiniVectorIndex :members: :undoc-members: @@ -147,6 +163,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: RetrieveUserVisibleMetricsResponse + :members: + :undoc-members: + .. autoclass:: ScanVectorIndexResponse :members: :undoc-members: diff --git a/docs/workspace/catalog/catalogs.rst b/docs/workspace/catalog/catalogs.rst index 17297d8dd..258f994d3 100644 --- a/docs/workspace/catalog/catalogs.rst +++ b/docs/workspace/catalog/catalogs.rst @@ -155,12 +155,13 @@ import time from databricks.sdk import WorkspaceClient + from databricks.sdk.service import catalog w = WorkspaceClient() created = w.catalogs.create(name=f"sdk-{time.time_ns()}") - _ = w.catalogs.update(name=created.name, comment="updated") + _ = w.catalogs.update(name=created.name, isolation_mode=catalog.CatalogIsolationMode.ISOLATED) # cleanup w.catalogs.delete(name=created.name, force=True) diff --git a/docs/workspace/catalog/external_locations.rst b/docs/workspace/catalog/external_locations.rst index 612800956..fdf69e38a 100644 --- a/docs/workspace/catalog/external_locations.rst +++ b/docs/workspace/catalog/external_locations.rst @@ -140,10 +140,11 @@ .. code-block:: from databricks.sdk import WorkspaceClient + from databricks.sdk.service import catalog w = WorkspaceClient() - all = w.external_locations.list() + all = w.external_locations.list(catalog.ListExternalLocationsRequest()) Gets an array of external locations (__ExternalLocationInfo__ objects) from the metastore. The caller must be a metastore admin, the owner of the external location, or a user that has some privilege on @@ -190,24 +191,24 @@ credential = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) created = w.external_locations.create( name=f"sdk-{time.time_ns()}", credential_name=credential.name, - url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), + url=f's3://{os.environ["TEST_BUCKET"]}/sdk-{time.time_ns()}', ) _ = w.external_locations.update( name=created.name, credential_name=credential.name, - url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), + url=f's3://{os.environ["TEST_BUCKET"]}/sdk-{time.time_ns()}', ) # cleanup - w.storage_credentials.delete(name=credential.name) - w.external_locations.delete(name=created.name) + w.storage_credentials.delete(delete=credential.name) + w.external_locations.delete(delete=created.name) Updates an external location in the metastore. The caller must be the owner of the external location, or be a metastore admin. In the second case, the admin can only update the name of the external diff --git a/docs/workspace/catalog/rfa.rst b/docs/workspace/catalog/rfa.rst index 3019403bb..e5e05073e 100644 --- a/docs/workspace/catalog/rfa.rst +++ b/docs/workspace/catalog/rfa.rst @@ -4,12 +4,10 @@ .. py:class:: RfaAPI - Request for Access enables customers to request access to and manage access request destinations for Unity - Catalog securables. + Request for Access enables users to request access for Unity Catalog securables. - These APIs provide a standardized way to update, get, and request to access request destinations. - Fine-grained authorization ensures that only users with appropriate permissions can manage access request - destinations. + These APIs provide a standardized way for securable owners (or users with MANAGE privileges) to manage + access request destinations. .. py:method:: batch_create_access_requests( [, requests: Optional[List[CreateAccessRequest]]]) -> BatchCreateAccessRequestsResponse diff --git a/docs/workspace/catalog/storage_credentials.rst b/docs/workspace/catalog/storage_credentials.rst index d8111141e..2eacfda5e 100644 --- a/docs/workspace/catalog/storage_credentials.rst +++ b/docs/workspace/catalog/storage_credentials.rst @@ -30,13 +30,13 @@ w = WorkspaceClient() - credential = w.storage_credentials.create( + created = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) # cleanup - w.storage_credentials.delete(name=credential.name) + w.storage_credentials.delete(delete=created.name) Creates a new storage credential. diff --git a/docs/workspace/catalog/tables.rst b/docs/workspace/catalog/tables.rst index b33bef940..8de553fc2 100644 --- a/docs/workspace/catalog/tables.rst +++ b/docs/workspace/catalog/tables.rst @@ -156,7 +156,7 @@ created_schema = w.schemas.create(name=f"sdk-{time.time_ns()}", catalog_name=created_catalog.name) - all_tables = w.tables.list(catalog_name=created_catalog.name, schema_name=created_schema.name) + summaries = w.tables.list_summaries(catalog_name=created_catalog.name, schema_name_pattern=created_schema.name) # cleanup w.schemas.delete(full_name=created_schema.full_name) diff --git a/docs/workspace/dashboards/genie.rst b/docs/workspace/dashboards/genie.rst index 3ceb286ef..45bb837e1 100644 --- a/docs/workspace/dashboards/genie.rst +++ b/docs/workspace/dashboards/genie.rst @@ -29,6 +29,27 @@ .. py:method:: create_message_and_wait(space_id: str, conversation_id: str, content: str, timeout: datetime.timedelta = 0:20:00) -> GenieMessage + .. py:method:: create_space(warehouse_id: str, serialized_space: str [, description: Optional[str], parent_path: Optional[str], title: Optional[str]]) -> GenieSpace + + Creates a Genie space from a serialized payload. + + :param warehouse_id: str + Warehouse to associate with the new space + :param serialized_space: str + The contents of the Genie Space in serialized string form. Use the [Get Genie + Space](:method:genie/getspace) API to retrieve an example response, which includes the + `serialized_space` field. This field provides the structure of the JSON string that represents the + space's layout and components. + :param description: str (optional) + Optional description + :param parent_path: str (optional) + Parent folder path where the space will be registered + :param title: str (optional) + Optional title override + + :returns: :class:`GenieSpace` + + .. py:method:: delete_conversation(space_id: str, conversation_id: str) Delete a conversation. @@ -150,12 +171,15 @@ :returns: :class:`GenieGetMessageQueryResultResponse` - .. py:method:: get_space(space_id: str) -> GenieSpace + .. py:method:: get_space(space_id: str [, include_serialized_space: Optional[bool]]) -> GenieSpace Get details of a Genie Space. :param space_id: str The ID associated with the Genie space + :param include_serialized_space: bool (optional) + Whether to include the serialized space export in the response. Requires at least CAN EDIT + permission on the space. :returns: :class:`GenieSpace` @@ -248,4 +272,25 @@ + .. py:method:: update_space(space_id: str [, description: Optional[str], serialized_space: Optional[str], title: Optional[str], warehouse_id: Optional[str]]) -> GenieSpace + + Updates a Genie space with a serialized payload. + + :param space_id: str + Genie space ID + :param description: str (optional) + Optional description + :param serialized_space: str (optional) + The contents of the Genie Space in serialized string form (full replacement). Use the [Get Genie + Space](:method:genie/getspace) API to retrieve an example response, which includes the + `serialized_space` field. This field provides the structure of the JSON string that represents the + space's layout and components. + :param title: str (optional) + Optional title override + :param warehouse_id: str (optional) + Optional warehouse override + + :returns: :class:`GenieSpace` + + .. py:method:: wait_get_message_genie_completed(conversation_id: str, message_id: str, space_id: str, timeout: datetime.timedelta = 0:20:00, callback: Optional[Callable[[GenieMessage], None]]) -> GenieMessage diff --git a/docs/workspace/iam/current_user.rst b/docs/workspace/iam/current_user.rst index b2390ce63..2f95213e2 100644 --- a/docs/workspace/iam/current_user.rst +++ b/docs/workspace/iam/current_user.rst @@ -17,7 +17,7 @@ w = WorkspaceClient() - me = w.current_user.me() + me2 = w.current_user.me() Get details about the current method caller's identity. diff --git a/docs/workspace/jobs/jobs.rst b/docs/workspace/jobs/jobs.rst index 0b82986de..a28fe7aee 100644 --- a/docs/workspace/jobs/jobs.rst +++ b/docs/workspace/jobs/jobs.rst @@ -10,7 +10,7 @@ scalable resources. Your job can consist of a single task or can be a large, multi-task workflow with complex dependencies. Databricks manages the task orchestration, cluster management, monitoring, and error reporting for all of your jobs. You can run your jobs immediately or periodically through an easy-to-use - scheduling system. You can implement job tasks using notebooks, JARS, Delta Live Tables pipelines, or + scheduling system. You can implement job tasks using notebooks, JARS, Spark Declarative Pipelines, or Python, Scala, Spark submit, and Java applications. You should never hard code secrets or store them in plain text. Use the [Secrets CLI] to manage secrets in @@ -188,9 +188,10 @@ as when this job is deleted. :param environments: List[:class:`JobEnvironment`] (optional) A list of task execution environment specifications that can be referenced by serverless tasks of - this job. An environment is required to be present for serverless tasks. For serverless notebook - tasks, the environment is accessible in the notebook environment panel. For other serverless tasks, - the task environment is required to be specified using environment_key in the task settings. + this job. For serverless notebook tasks, if the environment_key is not specified, the notebook + environment will be used if present. If a jobs environment is specified, it will override the + notebook environment. For other serverless tasks, the task environment is required to be specified + using environment_key in the task settings. :param format: :class:`Format` (optional) Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. When using the Jobs API 2.1 this value is always set to `"MULTI_TASK"`. @@ -522,37 +523,11 @@ .. code-block:: - import os - import time - from databricks.sdk import WorkspaceClient - from databricks.sdk.service import jobs w = WorkspaceClient() - notebook_path = f"/Users/{w.current_user.me().user_name}/sdk-{time.time_ns()}" - - cluster_id = ( - w.clusters.ensure_cluster_is_running(os.environ["DATABRICKS_CLUSTER_ID"]) and os.environ["DATABRICKS_CLUSTER_ID"] - ) - - created_job = w.jobs.create( - name=f"sdk-{time.time_ns()}", - tasks=[ - jobs.Task( - description="test", - existing_cluster_id=cluster_id, - notebook_task=jobs.NotebookTask(notebook_path=notebook_path), - task_key="test", - timeout_seconds=0, - ) - ], - ) - - run_list = w.jobs.list_runs(job_id=created_job.job_id) - - # cleanup - w.jobs.delete(job_id=created_job.job_id) + job_list = w.jobs.list(expand_tasks=False) List jobs. diff --git a/docs/workspace/ml/feature_engineering.rst b/docs/workspace/ml/feature_engineering.rst index 57c99a11c..b017ec828 100644 --- a/docs/workspace/ml/feature_engineering.rst +++ b/docs/workspace/ml/feature_engineering.rst @@ -26,6 +26,15 @@ :returns: :class:`Feature` + .. py:method:: create_kafka_config(kafka_config: KafkaConfig) -> KafkaConfig + + Create a Kafka config. + + :param kafka_config: :class:`KafkaConfig` + + :returns: :class:`KafkaConfig` + + .. py:method:: create_materialized_feature(materialized_feature: MaterializedFeature) -> MaterializedFeature Create a materialized feature. @@ -46,6 +55,16 @@ + .. py:method:: delete_kafka_config(name: str) + + Delete a Kafka config. + + :param name: str + Name of the Kafka config to delete. + + + + .. py:method:: delete_materialized_feature(materialized_feature_id: str) Delete a materialized feature. @@ -66,6 +85,16 @@ :returns: :class:`Feature` + .. py:method:: get_kafka_config(name: str) -> KafkaConfig + + Get a Kafka config. + + :param name: str + Name of the Kafka config to get. + + :returns: :class:`KafkaConfig` + + .. py:method:: get_materialized_feature(materialized_feature_id: str) -> MaterializedFeature Get a materialized feature. @@ -88,6 +117,18 @@ :returns: Iterator over :class:`Feature` + .. py:method:: list_kafka_configs( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[KafkaConfig] + + List Kafka configs. + + :param page_size: int (optional) + The maximum number of results to return. + :param page_token: str (optional) + Pagination token to go to the next page based on a previous query. + + :returns: Iterator over :class:`KafkaConfig` + + .. py:method:: list_materialized_features( [, feature_name: Optional[str], page_size: Optional[int], page_token: Optional[str]]) -> Iterator[MaterializedFeature] List materialized features. @@ -118,6 +159,22 @@ :returns: :class:`Feature` + .. py:method:: update_kafka_config(name: str, kafka_config: KafkaConfig, update_mask: FieldMask) -> KafkaConfig + + Update a Kafka config. + + :param name: str + Name that uniquely identifies this Kafka config within the metastore. This will be the identifier + used from the Feature object to reference these configs for a feature. Can be distinct from topic + name. + :param kafka_config: :class:`KafkaConfig` + The Kafka config to update. + :param update_mask: FieldMask + The list of fields to update. + + :returns: :class:`KafkaConfig` + + .. py:method:: update_materialized_feature(materialized_feature_id: str, materialized_feature: MaterializedFeature, update_mask: str) -> MaterializedFeature Update a materialized feature (pause/resume). diff --git a/docs/workspace/ml/feature_store.rst b/docs/workspace/ml/feature_store.rst index c85fd5e59..40ba8dc3b 100644 --- a/docs/workspace/ml/feature_store.rst +++ b/docs/workspace/ml/feature_store.rst @@ -31,6 +31,16 @@ + .. py:method:: delete_online_table(online_table_name: str) + + Delete online table. + + :param online_table_name: str + The full three-part (catalog, schema, table) name of the online table. + + + + .. py:method:: get_online_store(name: str) -> OnlineStore Get an Online Feature Store. diff --git a/docs/workspace/ml/model_registry.rst b/docs/workspace/ml/model_registry.rst index 98d803a63..601ffd87d 100644 --- a/docs/workspace/ml/model_registry.rst +++ b/docs/workspace/ml/model_registry.rst @@ -90,7 +90,9 @@ w = WorkspaceClient() - created = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") + model = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") + + created = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") Creates a new registered model with the name specified in the request body. Throws `RESOURCE_ALREADY_EXISTS` if a registered model with the given name exists. @@ -734,13 +736,14 @@ w = WorkspaceClient() - created = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") + model = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") - model = w.model_registry.get_model(name=created.registered_model.name) + created = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") - w.model_registry.update_model( - name=model.registered_model_databricks.name, + w.model_registry.update_model_version( description=f"sdk-{time.time_ns()}", + name=created.model_version.name, + version=created.model_version.version, ) Updates a registered model. diff --git a/docs/workspace/pipelines/pipelines.rst b/docs/workspace/pipelines/pipelines.rst index b0bada615..e147d5491 100644 --- a/docs/workspace/pipelines/pipelines.rst +++ b/docs/workspace/pipelines/pipelines.rst @@ -129,8 +129,8 @@ .. py:method:: delete(pipeline_id: str) - Deletes a pipeline. Deleting a pipeline is a permanent action that stops and removes the pipeline and - its tables. You cannot undo this action. + Deletes a pipeline. If the pipeline publishes to Unity Catalog, pipeline deletion will cascade to all + pipeline tables. Please reach out to Databricks support for assistance to undo this action. :param pipeline_id: str @@ -344,7 +344,7 @@ :returns: :class:`PipelinePermissions` - .. py:method:: start_update(pipeline_id: str [, cause: Optional[StartUpdateCause], full_refresh: Optional[bool], full_refresh_selection: Optional[List[str]], refresh_selection: Optional[List[str]], validate_only: Optional[bool]]) -> StartUpdateResponse + .. py:method:: start_update(pipeline_id: str [, cause: Optional[StartUpdateCause], full_refresh: Optional[bool], full_refresh_selection: Optional[List[str]], refresh_selection: Optional[List[str]], rewind_spec: Optional[RewindSpec], validate_only: Optional[bool]]) -> StartUpdateResponse Starts a new update for the pipeline. If there is already an active update for the pipeline, the request will fail and the active update will remain running. @@ -361,6 +361,8 @@ A list of tables to update without fullRefresh. If both refresh_selection and full_refresh_selection are empty, this is a full graph update. Full Refresh on a table means that the states of the table will be reset before the refresh. + :param rewind_spec: :class:`RewindSpec` (optional) + The information about the requested rewind operation. If specified this is a rewind mode update. :param validate_only: bool (optional) If true, this update only validates the correctness of pipeline source code but does not materialize or publish any datasets. diff --git a/docs/workspace/settingsv2/workspace_settings_v2.rst b/docs/workspace/settingsv2/workspace_settings_v2.rst index 2d6d379df..10534c2fa 100644 --- a/docs/workspace/settingsv2/workspace_settings_v2.rst +++ b/docs/workspace/settingsv2/workspace_settings_v2.rst @@ -12,6 +12,7 @@ of setting available via public APIs. :param name: str + Name of the setting :returns: :class:`Setting` @@ -39,9 +40,12 @@ .. py:method:: patch_public_workspace_setting(name: str, setting: Setting) -> Setting Patch a setting value at workspace level. See :method:settingsv2/listworkspacesettingsmetadata for - list of setting available via public APIs at workspace level. + list of setting available via public APIs at workspace level. To determine the correct field to + include in a patch request, refer to the type field of the setting returned in the + :method:settingsv2/listworkspacesettingsmetadata response. :param name: str + Name of the setting :param setting: :class:`Setting` :returns: :class:`Setting` diff --git a/docs/workspace/sql/queries.rst b/docs/workspace/sql/queries.rst index 0dfb63fbf..f0081b3f2 100644 --- a/docs/workspace/sql/queries.rst +++ b/docs/workspace/sql/queries.rst @@ -29,7 +29,7 @@ display_name=f"sdk-{time.time_ns()}", warehouse_id=srcs[0].warehouse_id, description="test query from Go SDK", - query_text="SELECT 1", + query_text="SHOW TABLES", ) ) diff --git a/docs/workspace/tags/tag_policies.rst b/docs/workspace/tags/tag_policies.rst index 35f893b2e..0c335d8ac 100644 --- a/docs/workspace/tags/tag_policies.rst +++ b/docs/workspace/tags/tag_policies.rst @@ -4,14 +4,22 @@ .. py:class:: TagPoliciesAPI - The Tag Policy API allows you to manage policies for governed tags in Databricks. Permissions for tag - policies can be managed using the [Account Access Control Proxy API]. + The Tag Policy API allows you to manage policies for governed tags in Databricks. For Terraform usage, see + the [Tag Policy Terraform documentation]. Permissions for tag policies can be managed using the [Account + Access Control Proxy API]. [Account Access Control Proxy API]: https://docs.databricks.com/api/workspace/accountaccesscontrolproxy + [Tag Policy Terraform documentation]: https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/tag_policy + .. py:method:: create_tag_policy(tag_policy: TagPolicy) -> TagPolicy - Creates a new tag policy, making the associated tag key governed. + Creates a new tag policy, making the associated tag key governed. For Terraform usage, see the [Tag + Policy Terraform documentation]. To manage permissions for tag policies, use the [Account Access + Control Proxy API]. + + [Account Access Control Proxy API]: https://docs.databricks.com/api/workspace/accountaccesscontrolproxy + [Tag Policy Terraform documentation]: https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/tag_policy :param tag_policy: :class:`TagPolicy` @@ -20,7 +28,10 @@ .. py:method:: delete_tag_policy(tag_key: str) - Deletes a tag policy by its associated governed tag's key, leaving that tag key ungoverned. + Deletes a tag policy by its associated governed tag's key, leaving that tag key ungoverned. For + Terraform usage, see the [Tag Policy Terraform documentation]. + + [Tag Policy Terraform documentation]: https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/tag_policy :param tag_key: str @@ -29,7 +40,12 @@ .. py:method:: get_tag_policy(tag_key: str) -> TagPolicy - Gets a single tag policy by its associated governed tag's key. + Gets a single tag policy by its associated governed tag's key. For Terraform usage, see the [Tag + Policy Terraform documentation]. To list granted permissions for tag policies, use the [Account Access + Control Proxy API]. + + [Account Access Control Proxy API]: https://docs.databricks.com/api/workspace/accountaccesscontrolproxy + [Tag Policy Terraform documentation]: https://registry.terraform.io/providers/databricks/databricks/latest/docs/data-sources/tag_policy :param tag_key: str @@ -38,7 +54,12 @@ .. py:method:: list_tag_policies( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[TagPolicy] - Lists the tag policies for all governed tags in the account. + Lists the tag policies for all governed tags in the account. For Terraform usage, see the [Tag Policy + Terraform documentation]. To list granted permissions for tag policies, use the [Account Access + Control Proxy API]. + + [Account Access Control Proxy API]: https://docs.databricks.com/api/workspace/accountaccesscontrolproxy + [Tag Policy Terraform documentation]: https://registry.terraform.io/providers/databricks/databricks/latest/docs/data-sources/tag_policies :param page_size: int (optional) The maximum number of results to return in this request. Fewer results may be returned than @@ -52,7 +73,12 @@ .. py:method:: update_tag_policy(tag_key: str, tag_policy: TagPolicy, update_mask: str) -> TagPolicy - Updates an existing tag policy for a single governed tag. + Updates an existing tag policy for a single governed tag. For Terraform usage, see the [Tag Policy + Terraform documentation]. To manage permissions for tag policies, use the [Account Access Control + Proxy API]. + + [Account Access Control Proxy API]: https://docs.databricks.com/api/workspace/accountaccesscontrolproxy + [Tag Policy Terraform documentation]: https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/tag_policy :param tag_key: str :param tag_policy: :class:`TagPolicy` diff --git a/docs/workspace/vectorsearch/vector_search_endpoints.rst b/docs/workspace/vectorsearch/vector_search_endpoints.rst index 47a8fa59a..53c0bdd7a 100644 --- a/docs/workspace/vectorsearch/vector_search_endpoints.rst +++ b/docs/workspace/vectorsearch/vector_search_endpoints.rst @@ -55,6 +55,26 @@ :returns: Iterator over :class:`EndpointInfo` + .. py:method:: retrieve_user_visible_metrics(name: str [, end_time: Optional[str], granularity_in_seconds: Optional[int], metrics: Optional[List[Metric]], page_token: Optional[str], start_time: Optional[str]]) -> RetrieveUserVisibleMetricsResponse + + Retrieve user-visible metrics for an endpoint + + :param name: str + Vector search endpoint name + :param end_time: str (optional) + End time for metrics query + :param granularity_in_seconds: int (optional) + Granularity in seconds + :param metrics: List[:class:`Metric`] (optional) + List of metrics to retrieve + :param page_token: str (optional) + Token for pagination + :param start_time: str (optional) + Start time for metrics query + + :returns: :class:`RetrieveUserVisibleMetricsResponse` + + .. py:method:: update_endpoint_budget_policy(endpoint_name: str, budget_policy_id: str) -> PatchEndpointBudgetPolicyResponse Update the budget policy of an endpoint diff --git a/docs/workspace/workspace/workspace.rst b/docs/workspace/workspace/workspace.rst index e1b7d12b9..aaccfb1c4 100644 --- a/docs/workspace/workspace/workspace.rst +++ b/docs/workspace/workspace/workspace.rst @@ -79,7 +79,7 @@ notebook = f"/Users/{w.current_user.me().user_name}/sdk-{time.time_ns()}" - export_response = w.workspace.export(format=workspace.ExportFormat.SOURCE, path=notebook) + export_response = w.workspace.export_(format=workspace.ExportFormat.SOURCE, path=notebook) Exports an object or the contents of an entire directory. @@ -111,7 +111,9 @@ Gets the permission levels that a user can have on an object. :param workspace_object_type: str - The workspace object type for which to get or manage permissions. + The workspace object type for which to get or manage permissions. Could be one of the following: + alerts, alertsv2, dashboards, dbsql-dashboards, directories, experiments, files, genie, notebooks, + queries :param workspace_object_id: str The workspace object for which to get or manage permissions. @@ -124,7 +126,9 @@ parent objects or root object. :param workspace_object_type: str - The workspace object type for which to get or manage permissions. + The workspace object type for which to get or manage permissions. Could be one of the following: + alerts, alertsv2, dashboards, dbsql-dashboards, directories, experiments, files, genie, notebooks, + queries :param workspace_object_id: str The workspace object for which to get or manage permissions. @@ -263,7 +267,9 @@ object. :param workspace_object_type: str - The workspace object type for which to get or manage permissions. + The workspace object type for which to get or manage permissions. Could be one of the following: + alerts, alertsv2, dashboards, dbsql-dashboards, directories, experiments, files, genie, notebooks, + queries :param workspace_object_id: str The workspace object for which to get or manage permissions. :param access_control_list: List[:class:`WorkspaceObjectAccessControlRequest`] (optional) @@ -277,7 +283,9 @@ parent objects or root object. :param workspace_object_type: str - The workspace object type for which to get or manage permissions. + The workspace object type for which to get or manage permissions. Could be one of the following: + alerts, alertsv2, dashboards, dbsql-dashboards, directories, experiments, files, genie, notebooks, + queries :param workspace_object_id: str The workspace object for which to get or manage permissions. :param access_control_list: List[:class:`WorkspaceObjectAccessControlRequest`] (optional) diff --git a/tests/databricks/sdk/service/lrotesting.py b/tests/databricks/sdk/service/lrotesting.py index 679118220..c06822e8f 100755 --- a/tests/databricks/sdk/service/lrotesting.py +++ b/tests/databricks/sdk/service/lrotesting.py @@ -4,7 +4,6 @@ import logging from dataclasses import dataclass -from datetime import timedelta from enum import Enum from typing import Any, Dict, List, Optional @@ -20,11 +19,7 @@ @dataclass class DatabricksServiceExceptionWithDetailsProto: - """Serialization format for DatabricksServiceException with error details. This message doesn't - work for ScalaPB-04 as google.protobuf.Any is only available to ScalaPB-09. Note the definition - of this message should be in sync with DatabricksServiceExceptionProto defined in - /api-base/proto/legacy/databricks.proto except the later one doesn't have the error details - field defined.""" + """Databricks Error that is returned by all Databricks APIs.""" details: Optional[List[dict]] = None """@pbjson-skip""" @@ -174,24 +169,15 @@ class Operation: metadata: Optional[dict] = None """Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such - metadata. Any method that returns a long-running operation should document the metadata type, if - any.""" + metadata.""" name: Optional[str] = None """The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with - `operations/{unique_id}`. - - Note: multi-segment resource names are not yet supported in the RPC framework and SDK/TF. Until - that support is added, `name` must be string without internal `/` separators.""" + `operations/{unique_id}`.""" response: Optional[dict] = None - """The normal, successful response of the operation. If the original method returns no data on - success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is - standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the - response should have the type `XxxResponse`, where `Xxx` is the original method name. For - example, if the original method name is `TakeSnapshot()`, the inferred response type is - `TakeSnapshotResponse`.""" + """The normal, successful response of the operation.""" def as_dict(self) -> dict: """Serializes the Operation into a dictionary suitable for use as a JSON request body.""" @@ -380,13 +366,13 @@ def __init__(self, impl: LroTestingAPI, operation: Operation): self._operation = operation def wait(self, opts: Optional[lro.LroOptions] = None) -> TestResource: - """Wait blocks until the long-running operation is completed with default 20 min - timeout. If the operation didn't finish within the timeout, this function will - raise an error of type TimeoutError, otherwise returns successful response and - any errors encountered. + """Wait blocks until the long-running operation is completed. If no timeout is + specified, this will poll indefinitely. If a timeout is provided and the operation + didn't finish within the timeout, this function will raise an error of type + TimeoutError, otherwise returns successful response and any errors encountered. :param opts: :class:`LroOptions` - Timeout options (default: 20 minutes) + Timeout options (default: polls indefinitely) :returns: :class:`TestResource` """ @@ -414,7 +400,7 @@ def poll_operation(): return test_resource, None - return poll(poll_operation, timeout=opts.timeout if opts is not None else timedelta(minutes=20)) + return poll(poll_operation, timeout=opts.timeout if opts is not None else None) def cancel(self): """Starts asynchronous cancellation on a long-running operation. The server @@ -463,13 +449,13 @@ def __init__(self, impl: LroTestingAPI, operation: Operation): self._operation = operation def wait(self, opts: Optional[lro.LroOptions] = None): - """Wait blocks until the long-running operation is completed with default 20 min - timeout. If the operation didn't finish within the timeout, this function will - raise an error of type TimeoutError, otherwise returns successful response and - any errors encountered. + """Wait blocks until the long-running operation is completed. If no timeout is + specified, this will poll indefinitely. If a timeout is provided and the operation + didn't finish within the timeout, this function will raise an error of type + TimeoutError, otherwise returns successful response and any errors encountered. :param opts: :class:`LroOptions` - Timeout options (default: 20 minutes) + Timeout options (default: polls indefinitely) :returns: :class:`Any /* MISSING TYPE */` """ @@ -495,7 +481,7 @@ def poll_operation(): return {}, None - poll(poll_operation, timeout=opts.timeout if opts is not None else timedelta(minutes=20)) + poll(poll_operation, timeout=opts.timeout if opts is not None else None) def cancel(self): """Starts asynchronous cancellation on a long-running operation. The server diff --git a/tests/generated/test_json_marshall.py b/tests/generated/test_json_marshall.py index 16fc6fb26..bf5460f2e 100755 --- a/tests/generated/test_json_marshall.py +++ b/tests/generated/test_json_marshall.py @@ -190,7 +190,7 @@ def _fieldmask(d: str) -> FieldMask: required_string="non_default_string", required_struct={}, required_timestamp=_timestamp("2023-12-31T23:59:59Z"), - required_value=json.loads("{}"), + required_value=json.loads('{"key": "value"}'), test_required_enum=TestEnum.TEST_ENUM_TWO, ), """{ @@ -198,6 +198,7 @@ def _fieldmask(d: str) -> FieldMask: "required_int32": 42, "required_int64": 1234567890123456789, "required_bool": true, + "required_value": {"key": "value"}, "required_message": {}, "test_required_enum": "TEST_ENUM_TWO", "required_duration": "7200s",