diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index ad1e20e88..f0c41e65c 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -27cebd58ae24e19c95c675db3a93b6046abaca2a \ No newline at end of file +f9fcbdbee161da31571f306e6f9f11d88606cc9d \ No newline at end of file diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index da08fc0c6..804e48ea0 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -13,3 +13,14 @@ ### Internal Changes ### API Changes +* Add `retrieve_user_visible_metrics()` method for [w.vector_search_endpoints](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/vectorsearch/vector_search_endpoints.html) workspace-level service. +* Add `purpose` field for `databricks.sdk.service.dashboards.TextAttachment`. +* Add `connection_parameters` field for `databricks.sdk.service.pipelines.IngestionGatewayPipelineDefinition`. +* Add `ingest_from_uc_foreign_catalog` field for `databricks.sdk.service.pipelines.IngestionPipelineDefinition`. +* Add `type_text` field for `databricks.sdk.service.vectorsearch.ColumnInfo`. +* Add `foreign_catalog` enum value for `databricks.sdk.service.pipelines.IngestionSourceType`. +* Add `creating` and `create_failed` enum values for `databricks.sdk.service.settings.CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionState`. +* Add `creating` and `create_failed` enum values for `databricks.sdk.service.settings.NccAzurePrivateEndpointRuleConnectionState`. +* Change `destinations` field for `databricks.sdk.service.catalog.AccessRequestDestinations` to no longer be required. +* [Breaking] Change `destinations` field for `databricks.sdk.service.catalog.AccessRequestDestinations` to no longer be required. +* [Breaking] Change `online_store_config` field for `databricks.sdk.service.ml.MaterializedFeature` to type `databricks.sdk.service.ml.OnlineStoreConfig` dataclass. \ No newline at end of file diff --git a/databricks/sdk/__init__.py b/databricks/sdk/__init__.py index 148bfdc43..e692d92f5 100755 --- a/databricks/sdk/__init__.py +++ b/databricks/sdk/__init__.py @@ -846,7 +846,7 @@ def resource_quotas(self) -> pkg_catalog.ResourceQuotasAPI: @property def rfa(self) -> pkg_catalog.RfaAPI: - """Request for Access enables customers to request access to and manage access request destinations for Unity Catalog securables.""" + """Request for Access enables users to request access for Unity Catalog securables.""" return self._rfa @property diff --git a/databricks/sdk/service/apps.py b/databricks/sdk/service/apps.py index f22edec9b..c6d6c2dbc 100755 --- a/databricks/sdk/service/apps.py +++ b/databricks/sdk/service/apps.py @@ -1474,7 +1474,6 @@ def from_dict(cls, d: Dict[str, Any]) -> ApplicationStatus: class ComputeSize(Enum): LARGE = "LARGE" - LIQUID = "LIQUID" MEDIUM = "MEDIUM" diff --git a/databricks/sdk/service/catalog.py b/databricks/sdk/service/catalog.py index a99c5405a..094b3ecd0 100755 --- a/databricks/sdk/service/catalog.py +++ b/databricks/sdk/service/catalog.py @@ -23,9 +23,6 @@ @dataclass class AccessRequestDestinations: - destinations: List[NotificationDestination] - """The access request destinations for the securable.""" - securable: Securable """The securable for which the access request destinations are being retrieved.""" @@ -33,6 +30,9 @@ class AccessRequestDestinations: """Indicates whether any destinations are hidden from the caller due to a lack of permissions. This value is true if the caller does not have permission to see all destinations.""" + destinations: Optional[List[NotificationDestination]] = None + """The access request destinations for the securable.""" + def as_dict(self) -> dict: """Serializes the AccessRequestDestinations into a dictionary suitable for use as a JSON request body.""" body = {} @@ -1740,7 +1740,7 @@ def from_dict(cls, d: Dict[str, Any]) -> ConnectionInfo: class ConnectionType(Enum): - """Next Id: 47""" + """Next Id: 48""" BIGQUERY = "BIGQUERY" DATABRICKS = "DATABRICKS" @@ -1750,7 +1750,6 @@ class ConnectionType(Enum): HTTP = "HTTP" MYSQL = "MYSQL" ORACLE = "ORACLE" - PALANTIR = "PALANTIR" POSTGRESQL = "POSTGRESQL" POWER_BI = "POWER_BI" REDSHIFT = "REDSHIFT" @@ -8745,7 +8744,7 @@ def from_dict(cls, d: Dict[str, Any]) -> Securable: class SecurableKind(Enum): - """Latest kind: CONNECTION_AWS_SECRETS_MANAGER = 270; Next id:271""" + """Latest kind: CONNECTION_SLACK_OAUTH_U2M_MAPPING = 272; Next id:273""" TABLE_DB_STORAGE = "TABLE_DB_STORAGE" TABLE_DELTA = "TABLE_DELTA" @@ -8787,7 +8786,6 @@ class SecurableKind(Enum): TABLE_FOREIGN_MYSQL = "TABLE_FOREIGN_MYSQL" TABLE_FOREIGN_NETSUITE = "TABLE_FOREIGN_NETSUITE" TABLE_FOREIGN_ORACLE = "TABLE_FOREIGN_ORACLE" - TABLE_FOREIGN_PALANTIR = "TABLE_FOREIGN_PALANTIR" TABLE_FOREIGN_POSTGRESQL = "TABLE_FOREIGN_POSTGRESQL" TABLE_FOREIGN_REDSHIFT = "TABLE_FOREIGN_REDSHIFT" TABLE_FOREIGN_SALESFORCE = "TABLE_FOREIGN_SALESFORCE" @@ -14650,12 +14648,10 @@ def list_quotas( class RfaAPI: - """Request for Access enables customers to request access to and manage access request destinations for Unity - Catalog securables. + """Request for Access enables users to request access for Unity Catalog securables. - These APIs provide a standardized way to update, get, and request to access request destinations. - Fine-grained authorization ensures that only users with appropriate permissions can manage access request - destinations.""" + These APIs provide a standardized way for securable owners (or users with MANAGE privileges) to manage + access request destinations.""" def __init__(self, api_client): self._api = api_client diff --git a/databricks/sdk/service/compute.py b/databricks/sdk/service/compute.py index 64e6f4b0a..473c7cca1 100755 --- a/databricks/sdk/service/compute.py +++ b/databricks/sdk/service/compute.py @@ -171,7 +171,7 @@ class AwsAttributes: be of a form like "us-west-2a". The provided availability zone must be in the same region as the Databricks deployment. For example, "us-west-2a" is not a valid zone id if the Databricks deployment resides in the "us-east-1" region. This is an optional field at cluster creation, and - if not specified, a default zone will be used. If the zone specified is "auto", will try to + if not specified, the zone "auto" will be used. If the zone specified is "auto", will try to place cluster in a zone with high availability, and will retry placement in a different AZ if there is not enough capacity. @@ -7112,7 +7112,6 @@ class TerminationReasonCode(Enum): DOCKER_IMAGE_PULL_FAILURE = "DOCKER_IMAGE_PULL_FAILURE" DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION = "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION" DOCKER_INVALID_OS_EXCEPTION = "DOCKER_INVALID_OS_EXCEPTION" - DRIVER_DNS_RESOLUTION_FAILURE = "DRIVER_DNS_RESOLUTION_FAILURE" DRIVER_EVICTION = "DRIVER_EVICTION" DRIVER_LAUNCH_TIMEOUT = "DRIVER_LAUNCH_TIMEOUT" DRIVER_NODE_UNREACHABLE = "DRIVER_NODE_UNREACHABLE" @@ -7191,8 +7190,6 @@ class TerminationReasonCode(Enum): NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG = "NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG" NETWORK_CONFIGURATION_FAILURE = "NETWORK_CONFIGURATION_FAILURE" NFS_MOUNT_FAILURE = "NFS_MOUNT_FAILURE" - NO_ACTIVATED_K8S = "NO_ACTIVATED_K8S" - NO_ACTIVATED_K8S_TESTING_TAG = "NO_ACTIVATED_K8S_TESTING_TAG" NO_MATCHED_K8S = "NO_MATCHED_K8S" NO_MATCHED_K8S_TESTING_TAG = "NO_MATCHED_K8S_TESTING_TAG" NPIP_TUNNEL_SETUP_FAILURE = "NPIP_TUNNEL_SETUP_FAILURE" @@ -7205,7 +7202,6 @@ class TerminationReasonCode(Enum): SECRET_CREATION_FAILURE = "SECRET_CREATION_FAILURE" SECRET_PERMISSION_DENIED = "SECRET_PERMISSION_DENIED" SECRET_RESOLUTION_ERROR = "SECRET_RESOLUTION_ERROR" - SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION = "SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION" SECURITY_DAEMON_REGISTRATION_EXCEPTION = "SECURITY_DAEMON_REGISTRATION_EXCEPTION" SELF_BOOTSTRAP_FAILURE = "SELF_BOOTSTRAP_FAILURE" SERVERLESS_LONG_RUNNING_TERMINATED = "SERVERLESS_LONG_RUNNING_TERMINATED" diff --git a/databricks/sdk/service/dashboards.py b/databricks/sdk/service/dashboards.py index 5bf772f27..6cd3f98bd 100755 --- a/databricks/sdk/service/dashboards.py +++ b/databricks/sdk/service/dashboards.py @@ -1161,7 +1161,6 @@ class MessageErrorType(Enum): INTERNAL_CATALOG_PATH_OVERLAP_EXCEPTION = "INTERNAL_CATALOG_PATH_OVERLAP_EXCEPTION" INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION = "INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION" INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION = "INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION" - INVALID_CHAT_COMPLETION_ARGUMENTS_JSON_EXCEPTION = "INVALID_CHAT_COMPLETION_ARGUMENTS_JSON_EXCEPTION" INVALID_CHAT_COMPLETION_JSON_EXCEPTION = "INVALID_CHAT_COMPLETION_JSON_EXCEPTION" INVALID_COMPLETION_REQUEST_EXCEPTION = "INVALID_COMPLETION_REQUEST_EXCEPTION" INVALID_FUNCTION_CALL_EXCEPTION = "INVALID_FUNCTION_CALL_EXCEPTION" @@ -1639,6 +1638,9 @@ class TextAttachment: id: Optional[str] = None + purpose: Optional[TextAttachmentPurpose] = None + """Purpose/intent of this text attachment""" + def as_dict(self) -> dict: """Serializes the TextAttachment into a dictionary suitable for use as a JSON request body.""" body = {} @@ -1646,6 +1648,8 @@ def as_dict(self) -> dict: body["content"] = self.content if self.id is not None: body["id"] = self.id + if self.purpose is not None: + body["purpose"] = self.purpose.value return body def as_shallow_dict(self) -> dict: @@ -1655,12 +1659,22 @@ def as_shallow_dict(self) -> dict: body["content"] = self.content if self.id is not None: body["id"] = self.id + if self.purpose is not None: + body["purpose"] = self.purpose return body @classmethod def from_dict(cls, d: Dict[str, Any]) -> TextAttachment: """Deserializes the TextAttachment from a dictionary.""" - return cls(content=d.get("content", None), id=d.get("id", None)) + return cls( + content=d.get("content", None), id=d.get("id", None), purpose=_enum(d, "purpose", TextAttachmentPurpose) + ) + + +class TextAttachmentPurpose(Enum): + """Purpose/intent of a text attachment""" + + FOLLOW_UP_QUESTION = "FOLLOW_UP_QUESTION" @dataclass diff --git a/databricks/sdk/service/iam.py b/databricks/sdk/service/iam.py index e84121f29..4c0d13ab6 100755 --- a/databricks/sdk/service/iam.py +++ b/databricks/sdk/service/iam.py @@ -2525,8 +2525,9 @@ def list( start_index: Optional[int] = None, ) -> Iterator[AccountGroup]: """Gets all details of the groups associated with the Databricks account. As of 08/22/2025, this endpoint - will not return members. Instead, members should be retrieved by iterating through `Get group - details`. + will no longer return members. Instead, members should be retrieved by iterating through `Get group + details`. Existing accounts that rely on this attribute will not be impacted and will continue + receiving member data as before. :param attributes: str (optional) Comma-separated list of attributes to return in response. diff --git a/databricks/sdk/service/jobs.py b/databricks/sdk/service/jobs.py index 1ca8e631c..83b35a218 100755 --- a/databricks/sdk/service/jobs.py +++ b/databricks/sdk/service/jobs.py @@ -2915,10 +2915,10 @@ class JobSettings: environments: Optional[List[JobEnvironment]] = None """A list of task execution environment specifications that can be referenced by serverless tasks - of this job. An environment is required to be present for serverless tasks. For serverless - notebook tasks, the environment is accessible in the notebook environment panel. For other - serverless tasks, the task environment is required to be specified using environment_key in the - task settings.""" + of this job. For serverless notebook tasks, if the environment_key is not specified, the + notebook environment will be used if present. If a jobs environment is specified, it will + override the notebook environment. For other serverless tasks, the task environment is required + to be specified using environment_key in the task settings.""" format: Optional[Format] = None """Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. @@ -8246,7 +8246,7 @@ class JobsAPI: scalable resources. Your job can consist of a single task or can be a large, multi-task workflow with complex dependencies. Databricks manages the task orchestration, cluster management, monitoring, and error reporting for all of your jobs. You can run your jobs immediately or periodically through an easy-to-use - scheduling system. You can implement job tasks using notebooks, JARS, Delta Live Tables pipelines, or + scheduling system. You can implement job tasks using notebooks, JARS, Spark Declarative Pipelines, or Python, Scala, Spark submit, and Java applications. You should never hard code secrets or store them in plain text. Use the [Secrets CLI] to manage secrets in @@ -8397,9 +8397,10 @@ def create( as when this job is deleted. :param environments: List[:class:`JobEnvironment`] (optional) A list of task execution environment specifications that can be referenced by serverless tasks of - this job. An environment is required to be present for serverless tasks. For serverless notebook - tasks, the environment is accessible in the notebook environment panel. For other serverless tasks, - the task environment is required to be specified using environment_key in the task settings. + this job. For serverless notebook tasks, if the environment_key is not specified, the notebook + environment will be used if present. If a jobs environment is specified, it will override the + notebook environment. For other serverless tasks, the task environment is required to be specified + using environment_key in the task settings. :param format: :class:`Format` (optional) Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. When using the Jobs API 2.1 this value is always set to `"MULTI_TASK"`. diff --git a/databricks/sdk/service/ml.py b/databricks/sdk/service/ml.py index 94fd823ca..ca0684c87 100755 --- a/databricks/sdk/service/ml.py +++ b/databricks/sdk/service/ml.py @@ -1385,7 +1385,11 @@ class Feature: """The filter condition applied to the source data before aggregation.""" lineage_context: Optional[LineageContext] = None - """Lineage context information for this feature.""" + """WARNING: This field is primarily intended for internal use by Databricks systems and is + automatically populated when features are created through Databricks notebooks or jobs. Users + should not manually set this field as incorrect values may lead to inaccurate lineage tracking + or unexpected behavior. This field will be set by feature-engineering client and should be left + unset by SDK and terraform users.""" def as_dict(self) -> dict: """Serializes the Feature into a dictionary suitable for use as a JSON request body.""" @@ -3154,7 +3158,7 @@ class MaterializedFeature: offline_store_config: Optional[OfflineStoreConfig] = None - online_store_config: Optional[OnlineStore] = None + online_store_config: Optional[OnlineStoreConfig] = None pipeline_schedule_state: Optional[MaterializedFeaturePipelineScheduleState] = None """The schedule state of the materialization pipeline.""" @@ -3209,7 +3213,7 @@ def from_dict(cls, d: Dict[str, Any]) -> MaterializedFeature: last_materialization_time=d.get("last_materialization_time", None), materialized_feature_id=d.get("materialized_feature_id", None), offline_store_config=_from_dict(d, "offline_store_config", OfflineStoreConfig), - online_store_config=_from_dict(d, "online_store_config", OnlineStore), + online_store_config=_from_dict(d, "online_store_config", OnlineStoreConfig), pipeline_schedule_state=_enum(d, "pipeline_schedule_state", MaterializedFeaturePipelineScheduleState), table_name=d.get("table_name", None), ) @@ -4013,6 +4017,60 @@ def from_dict(cls, d: Dict[str, Any]) -> OnlineStore: ) +@dataclass +class OnlineStoreConfig: + """Configuration for online store destination.""" + + catalog_name: str + """The Unity Catalog catalog name. This name is also used as the Lakebase logical database name.""" + + schema_name: str + """The Unity Catalog schema name.""" + + table_name_prefix: str + """Prefix for Unity Catalog table name. The materialized feature will be stored in a Lakebase table + with this prefix and a generated postfix.""" + + online_store_name: str + """The name of the target online store.""" + + def as_dict(self) -> dict: + """Serializes the OnlineStoreConfig into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.catalog_name is not None: + body["catalog_name"] = self.catalog_name + if self.online_store_name is not None: + body["online_store_name"] = self.online_store_name + if self.schema_name is not None: + body["schema_name"] = self.schema_name + if self.table_name_prefix is not None: + body["table_name_prefix"] = self.table_name_prefix + return body + + def as_shallow_dict(self) -> dict: + """Serializes the OnlineStoreConfig into a shallow dictionary of its immediate attributes.""" + body = {} + if self.catalog_name is not None: + body["catalog_name"] = self.catalog_name + if self.online_store_name is not None: + body["online_store_name"] = self.online_store_name + if self.schema_name is not None: + body["schema_name"] = self.schema_name + if self.table_name_prefix is not None: + body["table_name_prefix"] = self.table_name_prefix + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> OnlineStoreConfig: + """Deserializes the OnlineStoreConfig from a dictionary.""" + return cls( + catalog_name=d.get("catalog_name", None), + online_store_name=d.get("online_store_name", None), + schema_name=d.get("schema_name", None), + table_name_prefix=d.get("table_name_prefix", None), + ) + + class OnlineStoreState(Enum): AVAILABLE = "AVAILABLE" diff --git a/databricks/sdk/service/pipelines.py b/databricks/sdk/service/pipelines.py index 9ab410419..9c38df323 100755 --- a/databricks/sdk/service/pipelines.py +++ b/databricks/sdk/service/pipelines.py @@ -22,6 +22,33 @@ # all definitions in this file are in alphabetical order +@dataclass +class ConnectionParameters: + source_catalog: Optional[str] = None + """Source catalog for initial connection. This is necessary for schema exploration in some database + systems like Oracle, and optional but nice-to-have in some other database systems like Postgres. + For Oracle databases, this maps to a service name.""" + + def as_dict(self) -> dict: + """Serializes the ConnectionParameters into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.source_catalog is not None: + body["source_catalog"] = self.source_catalog + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ConnectionParameters into a shallow dictionary of its immediate attributes.""" + body = {} + if self.source_catalog is not None: + body["source_catalog"] = self.source_catalog + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ConnectionParameters: + """Deserializes the ConnectionParameters from a dictionary.""" + return cls(source_catalog=d.get("source_catalog", None)) + + @dataclass class CreatePipelineResponse: effective_settings: Optional[PipelineSpec] = None @@ -554,6 +581,9 @@ class IngestionGatewayPipelineDefinition: """[Deprecated, use connection_name instead] Immutable. The Unity Catalog connection that this gateway pipeline uses to communicate with the source.""" + connection_parameters: Optional[ConnectionParameters] = None + """Optional, Internal. Parameters required to establish an initial connection with the source.""" + gateway_storage_name: Optional[str] = None """Optional. The Unity Catalog-compatible name for the gateway storage location. This is the destination to use for the data that is extracted by the gateway. Spark Declarative Pipelines @@ -566,6 +596,8 @@ def as_dict(self) -> dict: body["connection_id"] = self.connection_id if self.connection_name is not None: body["connection_name"] = self.connection_name + if self.connection_parameters: + body["connection_parameters"] = self.connection_parameters.as_dict() if self.gateway_storage_catalog is not None: body["gateway_storage_catalog"] = self.gateway_storage_catalog if self.gateway_storage_name is not None: @@ -581,6 +613,8 @@ def as_shallow_dict(self) -> dict: body["connection_id"] = self.connection_id if self.connection_name is not None: body["connection_name"] = self.connection_name + if self.connection_parameters: + body["connection_parameters"] = self.connection_parameters if self.gateway_storage_catalog is not None: body["gateway_storage_catalog"] = self.gateway_storage_catalog if self.gateway_storage_name is not None: @@ -595,6 +629,7 @@ def from_dict(cls, d: Dict[str, Any]) -> IngestionGatewayPipelineDefinition: return cls( connection_id=d.get("connection_id", None), connection_name=d.get("connection_name", None), + connection_parameters=_from_dict(d, "connection_parameters", ConnectionParameters), gateway_storage_catalog=d.get("gateway_storage_catalog", None), gateway_storage_name=d.get("gateway_storage_name", None), gateway_storage_schema=d.get("gateway_storage_schema", None), @@ -607,6 +642,11 @@ class IngestionPipelineDefinition: """Immutable. The Unity Catalog connection that this ingestion pipeline uses to communicate with the source. This is used with connectors for applications like Salesforce, Workday, and so on.""" + ingest_from_uc_foreign_catalog: Optional[bool] = None + """Immutable. If set to true, the pipeline will ingest tables from the UC foreign catalogs directly + without the need to specify a UC connection or ingestion gateway. The `source_catalog` fields in + objects of IngestionConfig are interpreted as the UC foreign catalogs to ingest from.""" + ingestion_gateway_id: Optional[str] = None """Immutable. Identifier for the gateway that is used by this ingestion pipeline to communicate with the source database. This is used with connectors to databases like SQL Server.""" @@ -634,6 +674,8 @@ def as_dict(self) -> dict: body = {} if self.connection_name is not None: body["connection_name"] = self.connection_name + if self.ingest_from_uc_foreign_catalog is not None: + body["ingest_from_uc_foreign_catalog"] = self.ingest_from_uc_foreign_catalog if self.ingestion_gateway_id is not None: body["ingestion_gateway_id"] = self.ingestion_gateway_id if self.netsuite_jar_path is not None: @@ -653,6 +695,8 @@ def as_shallow_dict(self) -> dict: body = {} if self.connection_name is not None: body["connection_name"] = self.connection_name + if self.ingest_from_uc_foreign_catalog is not None: + body["ingest_from_uc_foreign_catalog"] = self.ingest_from_uc_foreign_catalog if self.ingestion_gateway_id is not None: body["ingestion_gateway_id"] = self.ingestion_gateway_id if self.netsuite_jar_path is not None: @@ -672,6 +716,7 @@ def from_dict(cls, d: Dict[str, Any]) -> IngestionPipelineDefinition: """Deserializes the IngestionPipelineDefinition from a dictionary.""" return cls( connection_name=d.get("connection_name", None), + ingest_from_uc_foreign_catalog=d.get("ingest_from_uc_foreign_catalog", None), ingestion_gateway_id=d.get("ingestion_gateway_id", None), netsuite_jar_path=d.get("netsuite_jar_path", None), objects=_repeated_dict(d, "objects", IngestionConfig), @@ -828,31 +873,20 @@ def from_dict(cls, d: Dict[str, Any]) -> IngestionPipelineDefinitionWorkdayRepor class IngestionSourceType(Enum): BIGQUERY = "BIGQUERY" - CONFLUENCE = "CONFLUENCE" DYNAMICS365 = "DYNAMICS365" FOREIGN_CATALOG = "FOREIGN_CATALOG" GA4_RAW_DATA = "GA4_RAW_DATA" - GOOGLE_ADS = "GOOGLE_ADS" - GUIDEWIRE = "GUIDEWIRE" - HUBSPOT = "HUBSPOT" MANAGED_POSTGRESQL = "MANAGED_POSTGRESQL" - META_MARKETING = "META_MARKETING" MYSQL = "MYSQL" NETSUITE = "NETSUITE" ORACLE = "ORACLE" POSTGRESQL = "POSTGRESQL" - REDSHIFT = "REDSHIFT" SALESFORCE = "SALESFORCE" - SALESFORCE_MARKETING_CLOUD = "SALESFORCE_MARKETING_CLOUD" SERVICENOW = "SERVICENOW" SHAREPOINT = "SHAREPOINT" - SQLDW = "SQLDW" SQLSERVER = "SQLSERVER" TERADATA = "TERADATA" - TIKTOK_ADS = "TIKTOK_ADS" - WORKDAY_HCM = "WORKDAY_HCM" WORKDAY_RAAS = "WORKDAY_RAAS" - ZENDESK = "ZENDESK" @dataclass diff --git a/databricks/sdk/service/settings.py b/databricks/sdk/service/settings.py index c6126a23e..99996e980 100755 --- a/databricks/sdk/service/settings.py +++ b/databricks/sdk/service/settings.py @@ -1180,6 +1180,8 @@ def from_dict(cls, d: Dict[str, Any]) -> CustomerFacingNetworkConnectivityConfig class CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionState(Enum): + CREATE_FAILED = "CREATE_FAILED" + CREATING = "CREATING" DISCONNECTED = "DISCONNECTED" ESTABLISHED = "ESTABLISHED" EXPIRED = "EXPIRED" @@ -3825,6 +3827,8 @@ def from_dict(cls, d: Dict[str, Any]) -> NccAzurePrivateEndpointRule: class NccAzurePrivateEndpointRuleConnectionState(Enum): + CREATE_FAILED = "CREATE_FAILED" + CREATING = "CREATING" DISCONNECTED = "DISCONNECTED" ESTABLISHED = "ESTABLISHED" EXPIRED = "EXPIRED" @@ -4165,8 +4169,6 @@ def from_dict(cls, d: Dict[str, Any]) -> NccPrivateEndpointRule: class NccPrivateEndpointRulePrivateLinkConnectionState(Enum): - CREATE_FAILED = "CREATE_FAILED" - CREATING = "CREATING" DISCONNECTED = "DISCONNECTED" ESTABLISHED = "ESTABLISHED" EXPIRED = "EXPIRED" diff --git a/databricks/sdk/service/sql.py b/databricks/sdk/service/sql.py index 2b839f687..403f401d3 100755 --- a/databricks/sdk/service/sql.py +++ b/databricks/sdk/service/sql.py @@ -950,6 +950,7 @@ class AlertV2OperandColumn: name: str aggregation: Optional[Aggregation] = None + """If not set, the behavior is equivalent to using `First row` in the UI.""" display: Optional[str] = None @@ -6302,7 +6303,6 @@ class TerminationReasonCode(Enum): DOCKER_IMAGE_PULL_FAILURE = "DOCKER_IMAGE_PULL_FAILURE" DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION = "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION" DOCKER_INVALID_OS_EXCEPTION = "DOCKER_INVALID_OS_EXCEPTION" - DRIVER_DNS_RESOLUTION_FAILURE = "DRIVER_DNS_RESOLUTION_FAILURE" DRIVER_EVICTION = "DRIVER_EVICTION" DRIVER_LAUNCH_TIMEOUT = "DRIVER_LAUNCH_TIMEOUT" DRIVER_NODE_UNREACHABLE = "DRIVER_NODE_UNREACHABLE" @@ -6381,8 +6381,6 @@ class TerminationReasonCode(Enum): NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG = "NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG" NETWORK_CONFIGURATION_FAILURE = "NETWORK_CONFIGURATION_FAILURE" NFS_MOUNT_FAILURE = "NFS_MOUNT_FAILURE" - NO_ACTIVATED_K8S = "NO_ACTIVATED_K8S" - NO_ACTIVATED_K8S_TESTING_TAG = "NO_ACTIVATED_K8S_TESTING_TAG" NO_MATCHED_K8S = "NO_MATCHED_K8S" NO_MATCHED_K8S_TESTING_TAG = "NO_MATCHED_K8S_TESTING_TAG" NPIP_TUNNEL_SETUP_FAILURE = "NPIP_TUNNEL_SETUP_FAILURE" @@ -6395,7 +6393,6 @@ class TerminationReasonCode(Enum): SECRET_CREATION_FAILURE = "SECRET_CREATION_FAILURE" SECRET_PERMISSION_DENIED = "SECRET_PERMISSION_DENIED" SECRET_RESOLUTION_ERROR = "SECRET_RESOLUTION_ERROR" - SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION = "SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION" SECURITY_DAEMON_REGISTRATION_EXCEPTION = "SECURITY_DAEMON_REGISTRATION_EXCEPTION" SELF_BOOTSTRAP_FAILURE = "SELF_BOOTSTRAP_FAILURE" SERVERLESS_LONG_RUNNING_TERMINATED = "SERVERLESS_LONG_RUNNING_TERMINATED" diff --git a/databricks/sdk/service/vectorsearch.py b/databricks/sdk/service/vectorsearch.py index a0b731ffa..567447366 100755 --- a/databricks/sdk/service/vectorsearch.py +++ b/databricks/sdk/service/vectorsearch.py @@ -26,11 +26,16 @@ class ColumnInfo: name: Optional[str] = None """Name of the column.""" + type_text: Optional[str] = None + """Data type of the column (e.g., "string", "int", "array")""" + def as_dict(self) -> dict: """Serializes the ColumnInfo into a dictionary suitable for use as a JSON request body.""" body = {} if self.name is not None: body["name"] = self.name + if self.type_text is not None: + body["type_text"] = self.type_text return body def as_shallow_dict(self) -> dict: @@ -38,12 +43,14 @@ def as_shallow_dict(self) -> dict: body = {} if self.name is not None: body["name"] = self.name + if self.type_text is not None: + body["type_text"] = self.type_text return body @classmethod def from_dict(cls, d: Dict[str, Any]) -> ColumnInfo: """Deserializes the ColumnInfo from a dictionary.""" - return cls(name=d.get("name", None)) + return cls(name=d.get("name", None), type_text=d.get("type_text", None)) @dataclass @@ -737,6 +744,153 @@ def from_dict(cls, d: Dict[str, Any]) -> MapStringValueEntry: return cls(key=d.get("key", None), value=_from_dict(d, "value", Value)) +@dataclass +class Metric: + """Metric specification""" + + labels: Optional[List[MetricLabel]] = None + """Metric labels""" + + name: Optional[str] = None + """Metric name""" + + percentile: Optional[float] = None + """Percentile for the metric""" + + def as_dict(self) -> dict: + """Serializes the Metric into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.labels: + body["labels"] = [v.as_dict() for v in self.labels] + if self.name is not None: + body["name"] = self.name + if self.percentile is not None: + body["percentile"] = self.percentile + return body + + def as_shallow_dict(self) -> dict: + """Serializes the Metric into a shallow dictionary of its immediate attributes.""" + body = {} + if self.labels: + body["labels"] = self.labels + if self.name is not None: + body["name"] = self.name + if self.percentile is not None: + body["percentile"] = self.percentile + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> Metric: + """Deserializes the Metric from a dictionary.""" + return cls( + labels=_repeated_dict(d, "labels", MetricLabel), + name=d.get("name", None), + percentile=d.get("percentile", None), + ) + + +@dataclass +class MetricLabel: + """Label for a metric""" + + name: Optional[str] = None + """Label name""" + + value: Optional[str] = None + """Label value""" + + def as_dict(self) -> dict: + """Serializes the MetricLabel into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.name is not None: + body["name"] = self.name + if self.value is not None: + body["value"] = self.value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the MetricLabel into a shallow dictionary of its immediate attributes.""" + body = {} + if self.name is not None: + body["name"] = self.name + if self.value is not None: + body["value"] = self.value + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> MetricLabel: + """Deserializes the MetricLabel from a dictionary.""" + return cls(name=d.get("name", None), value=d.get("value", None)) + + +@dataclass +class MetricValue: + """Single metric value at a specific timestamp""" + + timestamp: Optional[int] = None + """Timestamp of the metric value (milliseconds since epoch)""" + + value: Optional[float] = None + """Metric value""" + + def as_dict(self) -> dict: + """Serializes the MetricValue into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.timestamp is not None: + body["timestamp"] = self.timestamp + if self.value is not None: + body["value"] = self.value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the MetricValue into a shallow dictionary of its immediate attributes.""" + body = {} + if self.timestamp is not None: + body["timestamp"] = self.timestamp + if self.value is not None: + body["value"] = self.value + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> MetricValue: + """Deserializes the MetricValue from a dictionary.""" + return cls(timestamp=d.get("timestamp", None), value=d.get("value", None)) + + +@dataclass +class MetricValues: + """Collection of metric values for a specific metric""" + + metric: Optional[Metric] = None + """Metric specification""" + + values: Optional[List[MetricValue]] = None + """Time series of metric values""" + + def as_dict(self) -> dict: + """Serializes the MetricValues into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.metric: + body["metric"] = self.metric.as_dict() + if self.values: + body["values"] = [v.as_dict() for v in self.values] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the MetricValues into a shallow dictionary of its immediate attributes.""" + body = {} + if self.metric: + body["metric"] = self.metric + if self.values: + body["values"] = self.values + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> MetricValues: + """Deserializes the MetricValues from a dictionary.""" + return cls(metric=_from_dict(d, "metric", Metric), values=_repeated_dict(d, "values", MetricValue)) + + @dataclass class MiniVectorIndex: creator: Optional[str] = None @@ -998,6 +1152,44 @@ def from_dict(cls, d: Dict[str, Any]) -> ResultManifest: return cls(column_count=d.get("column_count", None), columns=_repeated_dict(d, "columns", ColumnInfo)) +@dataclass +class RetrieveUserVisibleMetricsResponse: + """Response containing user-visible metrics""" + + metric_values: Optional[List[MetricValues]] = None + """Collection of metric values""" + + next_page_token: Optional[str] = None + """A token that can be used to get the next page of results. If not present, there are no more + results to show.""" + + def as_dict(self) -> dict: + """Serializes the RetrieveUserVisibleMetricsResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.metric_values: + body["metric_values"] = [v.as_dict() for v in self.metric_values] + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + def as_shallow_dict(self) -> dict: + """Serializes the RetrieveUserVisibleMetricsResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.metric_values: + body["metric_values"] = self.metric_values + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> RetrieveUserVisibleMetricsResponse: + """Deserializes the RetrieveUserVisibleMetricsResponse from a dictionary.""" + return cls( + metric_values=_repeated_dict(d, "metric_values", MetricValues), + next_page_token=d.get("next_page_token", None), + ) + + @dataclass class ScanVectorIndexResponse: """Response to a scan vector index request.""" @@ -1519,6 +1711,53 @@ def list_endpoints(self, *, page_token: Optional[str] = None) -> Iterator[Endpoi return query["page_token"] = json["next_page_token"] + def retrieve_user_visible_metrics( + self, + name: str, + *, + end_time: Optional[str] = None, + granularity_in_seconds: Optional[int] = None, + metrics: Optional[List[Metric]] = None, + page_token: Optional[str] = None, + start_time: Optional[str] = None, + ) -> RetrieveUserVisibleMetricsResponse: + """Retrieve user-visible metrics for an endpoint + + :param name: str + Vector search endpoint name + :param end_time: str (optional) + End time for metrics query + :param granularity_in_seconds: int (optional) + Granularity in seconds + :param metrics: List[:class:`Metric`] (optional) + List of metrics to retrieve + :param page_token: str (optional) + Token for pagination + :param start_time: str (optional) + Start time for metrics query + + :returns: :class:`RetrieveUserVisibleMetricsResponse` + """ + + body = {} + if end_time is not None: + body["end_time"] = end_time + if granularity_in_seconds is not None: + body["granularity_in_seconds"] = granularity_in_seconds + if metrics is not None: + body["metrics"] = [v.as_dict() for v in metrics] + if page_token is not None: + body["page_token"] = page_token + if start_time is not None: + body["start_time"] = start_time + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", f"/api/2.0/vector-search/endpoints/{name}/metrics", body=body, headers=headers) + return RetrieveUserVisibleMetricsResponse.from_dict(res) + def update_endpoint_budget_policy( self, endpoint_name: str, budget_policy_id: str ) -> PatchEndpointBudgetPolicyResponse: diff --git a/docs/account/iam/groups_v2.rst b/docs/account/iam/groups_v2.rst index 9a38fb63d..622277161 100644 --- a/docs/account/iam/groups_v2.rst +++ b/docs/account/iam/groups_v2.rst @@ -52,8 +52,9 @@ .. py:method:: list( [, attributes: Optional[str], count: Optional[int], excluded_attributes: Optional[str], filter: Optional[str], sort_by: Optional[str], sort_order: Optional[ListSortOrder], start_index: Optional[int]]) -> Iterator[AccountGroup] Gets all details of the groups associated with the Databricks account. As of 08/22/2025, this endpoint - will not return members. Instead, members should be retrieved by iterating through `Get group - details`. + will no longer return members. Instead, members should be retrieved by iterating through `Get group + details`. Existing accounts that rely on this attribute will not be impacted and will continue + receiving member data as before. :param attributes: str (optional) Comma-separated list of attributes to return in response. diff --git a/docs/account/iam/workspace_assignment.rst b/docs/account/iam/workspace_assignment.rst index 2a8043172..ca78b86df 100644 --- a/docs/account/iam/workspace_assignment.rst +++ b/docs/account/iam/workspace_assignment.rst @@ -43,9 +43,9 @@ a = AccountClient() - workspace_id = os.environ["DUMMY_WORKSPACE_ID"] + workspace_id = os.environ["TEST_WORKSPACE_ID"] - all = a.workspace_assignment.list(workspace_id=workspace_id) + all = a.workspace_assignment.list(list=workspace_id) Get the permission assignments for the specified Databricks account and Databricks workspace. diff --git a/docs/account/provisioning/storage.rst b/docs/account/provisioning/storage.rst index 41a04deb3..25ee5abaa 100644 --- a/docs/account/provisioning/storage.rst +++ b/docs/account/provisioning/storage.rst @@ -23,13 +23,10 @@ a = AccountClient() - bucket = a.storage.create( + storage = a.storage.create( storage_configuration_name=f"sdk-{time.time_ns()}", root_bucket_info=provisioning.RootBucketInfo(bucket_name=f"sdk-{time.time_ns()}"), ) - - # cleanup - a.storage.delete(storage_configuration_id=bucket.storage_configuration_id) Creates a Databricks storage configuration for an account. diff --git a/docs/dbdataclasses/apps.rst b/docs/dbdataclasses/apps.rst index 320c875e1..2bc765a2a 100644 --- a/docs/dbdataclasses/apps.rst +++ b/docs/dbdataclasses/apps.rst @@ -329,9 +329,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: LARGE :value: "LARGE" - .. py:attribute:: LIQUID - :value: "LIQUID" - .. py:attribute:: MEDIUM :value: "MEDIUM" diff --git a/docs/dbdataclasses/catalog.rst b/docs/dbdataclasses/catalog.rst index 44209d4b9..64c296f7c 100644 --- a/docs/dbdataclasses/catalog.rst +++ b/docs/dbdataclasses/catalog.rst @@ -279,7 +279,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: ConnectionType - Next Id: 47 + Next Id: 48 .. py:attribute:: BIGQUERY :value: "BIGQUERY" @@ -305,9 +305,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: ORACLE :value: "ORACLE" - .. py:attribute:: PALANTIR - :value: "PALANTIR" - .. py:attribute:: POSTGRESQL :value: "POSTGRESQL" @@ -1500,7 +1497,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: SecurableKind - Latest kind: CONNECTION_AWS_SECRETS_MANAGER = 270; Next id:271 + Latest kind: CONNECTION_SLACK_OAUTH_U2M_MAPPING = 272; Next id:273 .. py:attribute:: TABLE_DB_STORAGE :value: "TABLE_DB_STORAGE" @@ -1604,9 +1601,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: TABLE_FOREIGN_ORACLE :value: "TABLE_FOREIGN_ORACLE" - .. py:attribute:: TABLE_FOREIGN_PALANTIR - :value: "TABLE_FOREIGN_PALANTIR" - .. py:attribute:: TABLE_FOREIGN_POSTGRESQL :value: "TABLE_FOREIGN_POSTGRESQL" diff --git a/docs/dbdataclasses/compute.rst b/docs/dbdataclasses/compute.rst index c6064252a..7ded58e8f 100644 --- a/docs/dbdataclasses/compute.rst +++ b/docs/dbdataclasses/compute.rst @@ -1216,9 +1216,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: DOCKER_INVALID_OS_EXCEPTION :value: "DOCKER_INVALID_OS_EXCEPTION" - .. py:attribute:: DRIVER_DNS_RESOLUTION_FAILURE - :value: "DRIVER_DNS_RESOLUTION_FAILURE" - .. py:attribute:: DRIVER_EVICTION :value: "DRIVER_EVICTION" @@ -1441,12 +1438,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: NFS_MOUNT_FAILURE :value: "NFS_MOUNT_FAILURE" - .. py:attribute:: NO_ACTIVATED_K8S - :value: "NO_ACTIVATED_K8S" - - .. py:attribute:: NO_ACTIVATED_K8S_TESTING_TAG - :value: "NO_ACTIVATED_K8S_TESTING_TAG" - .. py:attribute:: NO_MATCHED_K8S :value: "NO_MATCHED_K8S" @@ -1483,9 +1474,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: SECRET_RESOLUTION_ERROR :value: "SECRET_RESOLUTION_ERROR" - .. py:attribute:: SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION - :value: "SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION" - .. py:attribute:: SECURITY_DAEMON_REGISTRATION_EXCEPTION :value: "SECURITY_DAEMON_REGISTRATION_EXCEPTION" diff --git a/docs/dbdataclasses/dashboards.rst b/docs/dbdataclasses/dashboards.rst index df004c847..af9a747b3 100644 --- a/docs/dbdataclasses/dashboards.rst +++ b/docs/dbdataclasses/dashboards.rst @@ -214,9 +214,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION :value: "INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION" - .. py:attribute:: INVALID_CHAT_COMPLETION_ARGUMENTS_JSON_EXCEPTION - :value: "INVALID_CHAT_COMPLETION_ARGUMENTS_JSON_EXCEPTION" - .. py:attribute:: INVALID_CHAT_COMPLETION_JSON_EXCEPTION :value: "INVALID_CHAT_COMPLETION_JSON_EXCEPTION" @@ -385,6 +382,13 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. py:class:: TextAttachmentPurpose + + Purpose/intent of a text attachment + + .. py:attribute:: FOLLOW_UP_QUESTION + :value: "FOLLOW_UP_QUESTION" + .. autoclass:: TrashDashboardResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/ml.rst b/docs/dbdataclasses/ml.rst index 844e66245..3b514c298 100644 --- a/docs/dbdataclasses/ml.rst +++ b/docs/dbdataclasses/ml.rst @@ -604,6 +604,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: OnlineStoreConfig + :members: + :undoc-members: + .. py:class:: OnlineStoreState .. py:attribute:: AVAILABLE diff --git a/docs/dbdataclasses/pipelines.rst b/docs/dbdataclasses/pipelines.rst index cc2f14411..e5ac28b74 100644 --- a/docs/dbdataclasses/pipelines.rst +++ b/docs/dbdataclasses/pipelines.rst @@ -4,6 +4,10 @@ Delta Live Tables These dataclasses are used in the SDK to represent API requests and responses for services in the ``databricks.sdk.service.pipelines`` module. .. py:currentmodule:: databricks.sdk.service.pipelines +.. autoclass:: ConnectionParameters + :members: + :undoc-members: + .. autoclass:: CreatePipelineResponse :members: :undoc-members: @@ -139,9 +143,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: BIGQUERY :value: "BIGQUERY" - .. py:attribute:: CONFLUENCE - :value: "CONFLUENCE" - .. py:attribute:: DYNAMICS365 :value: "DYNAMICS365" @@ -151,21 +152,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: GA4_RAW_DATA :value: "GA4_RAW_DATA" - .. py:attribute:: GOOGLE_ADS - :value: "GOOGLE_ADS" - - .. py:attribute:: GUIDEWIRE - :value: "GUIDEWIRE" - - .. py:attribute:: HUBSPOT - :value: "HUBSPOT" - .. py:attribute:: MANAGED_POSTGRESQL :value: "MANAGED_POSTGRESQL" - .. py:attribute:: META_MARKETING - :value: "META_MARKETING" - .. py:attribute:: MYSQL :value: "MYSQL" @@ -178,42 +167,24 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: POSTGRESQL :value: "POSTGRESQL" - .. py:attribute:: REDSHIFT - :value: "REDSHIFT" - .. py:attribute:: SALESFORCE :value: "SALESFORCE" - .. py:attribute:: SALESFORCE_MARKETING_CLOUD - :value: "SALESFORCE_MARKETING_CLOUD" - .. py:attribute:: SERVICENOW :value: "SERVICENOW" .. py:attribute:: SHAREPOINT :value: "SHAREPOINT" - .. py:attribute:: SQLDW - :value: "SQLDW" - .. py:attribute:: SQLSERVER :value: "SQLSERVER" .. py:attribute:: TERADATA :value: "TERADATA" - .. py:attribute:: TIKTOK_ADS - :value: "TIKTOK_ADS" - - .. py:attribute:: WORKDAY_HCM - :value: "WORKDAY_HCM" - .. py:attribute:: WORKDAY_RAAS :value: "WORKDAY_RAAS" - .. py:attribute:: ZENDESK - :value: "ZENDESK" - .. autoclass:: ListPipelineEventsResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/settings.rst b/docs/dbdataclasses/settings.rst index b47a84770..a1687d876 100644 --- a/docs/dbdataclasses/settings.rst +++ b/docs/dbdataclasses/settings.rst @@ -208,6 +208,12 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionState + .. py:attribute:: CREATE_FAILED + :value: "CREATE_FAILED" + + .. py:attribute:: CREATING + :value: "CREATING" + .. py:attribute:: DISCONNECTED :value: "DISCONNECTED" @@ -601,6 +607,12 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: NccAzurePrivateEndpointRuleConnectionState + .. py:attribute:: CREATE_FAILED + :value: "CREATE_FAILED" + + .. py:attribute:: CREATING + :value: "CREATING" + .. py:attribute:: DISCONNECTED :value: "DISCONNECTED" @@ -641,12 +653,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: NccPrivateEndpointRulePrivateLinkConnectionState - .. py:attribute:: CREATE_FAILED - :value: "CREATE_FAILED" - - .. py:attribute:: CREATING - :value: "CREATING" - .. py:attribute:: DISCONNECTED :value: "DISCONNECTED" diff --git a/docs/dbdataclasses/sql.rst b/docs/dbdataclasses/sql.rst index 865aba6c5..66f6340c9 100644 --- a/docs/dbdataclasses/sql.rst +++ b/docs/dbdataclasses/sql.rst @@ -1296,9 +1296,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: DOCKER_INVALID_OS_EXCEPTION :value: "DOCKER_INVALID_OS_EXCEPTION" - .. py:attribute:: DRIVER_DNS_RESOLUTION_FAILURE - :value: "DRIVER_DNS_RESOLUTION_FAILURE" - .. py:attribute:: DRIVER_EVICTION :value: "DRIVER_EVICTION" @@ -1521,12 +1518,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: NFS_MOUNT_FAILURE :value: "NFS_MOUNT_FAILURE" - .. py:attribute:: NO_ACTIVATED_K8S - :value: "NO_ACTIVATED_K8S" - - .. py:attribute:: NO_ACTIVATED_K8S_TESTING_TAG - :value: "NO_ACTIVATED_K8S_TESTING_TAG" - .. py:attribute:: NO_MATCHED_K8S :value: "NO_MATCHED_K8S" @@ -1563,9 +1554,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: SECRET_RESOLUTION_ERROR :value: "SECRET_RESOLUTION_ERROR" - .. py:attribute:: SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION - :value: "SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION" - .. py:attribute:: SECURITY_DAEMON_REGISTRATION_EXCEPTION :value: "SECURITY_DAEMON_REGISTRATION_EXCEPTION" diff --git a/docs/dbdataclasses/vectorsearch.rst b/docs/dbdataclasses/vectorsearch.rst index b8bd46536..33e37bdd8 100644 --- a/docs/dbdataclasses/vectorsearch.rst +++ b/docs/dbdataclasses/vectorsearch.rst @@ -109,6 +109,22 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: Metric + :members: + :undoc-members: + +.. autoclass:: MetricLabel + :members: + :undoc-members: + +.. autoclass:: MetricValue + :members: + :undoc-members: + +.. autoclass:: MetricValues + :members: + :undoc-members: + .. autoclass:: MiniVectorIndex :members: :undoc-members: @@ -147,6 +163,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: RetrieveUserVisibleMetricsResponse + :members: + :undoc-members: + .. autoclass:: ScanVectorIndexResponse :members: :undoc-members: diff --git a/docs/workspace/catalog/catalogs.rst b/docs/workspace/catalog/catalogs.rst index 17297d8dd..d91417852 100644 --- a/docs/workspace/catalog/catalogs.rst +++ b/docs/workspace/catalog/catalogs.rst @@ -24,10 +24,10 @@ w = WorkspaceClient() - created_catalog = w.catalogs.create(name=f"sdk-{time.time_ns()}") + new_catalog = w.catalogs.create(name=f"sdk-{time.time_ns()}") # cleanup - w.catalogs.delete(name=created_catalog.name, force=True) + w.catalogs.delete(name=new_catalog.name, force=True) Creates a new catalog instance in the parent metastore if the caller is a metastore admin or has the **CREATE_CATALOG** privilege. @@ -155,12 +155,13 @@ import time from databricks.sdk import WorkspaceClient + from databricks.sdk.service import catalog w = WorkspaceClient() created = w.catalogs.create(name=f"sdk-{time.time_ns()}") - _ = w.catalogs.update(name=created.name, comment="updated") + _ = w.catalogs.update(name=created.name, isolation_mode=catalog.CatalogIsolationMode.ISOLATED) # cleanup w.catalogs.delete(name=created.name, force=True) diff --git a/docs/workspace/catalog/external_locations.rst b/docs/workspace/catalog/external_locations.rst index 612800956..2dd2f9cef 100644 --- a/docs/workspace/catalog/external_locations.rst +++ b/docs/workspace/catalog/external_locations.rst @@ -32,18 +32,18 @@ credential = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) created = w.external_locations.create( name=f"sdk-{time.time_ns()}", credential_name=credential.name, - url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), + url=f's3://{os.environ["TEST_BUCKET"]}/sdk-{time.time_ns()}', ) # cleanup - w.storage_credentials.delete(name=credential.name) - w.external_locations.delete(name=created.name) + w.storage_credentials.delete(delete=credential.name) + w.external_locations.delete(delete=created.name) Creates a new external location entry in the metastore. The caller must be a metastore admin or have the **CREATE_EXTERNAL_LOCATION** privilege on both the metastore and the associated storage @@ -190,24 +190,24 @@ credential = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) created = w.external_locations.create( name=f"sdk-{time.time_ns()}", credential_name=credential.name, - url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), + url=f's3://{os.environ["TEST_BUCKET"]}/sdk-{time.time_ns()}', ) _ = w.external_locations.update( name=created.name, credential_name=credential.name, - url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), + url=f's3://{os.environ["TEST_BUCKET"]}/sdk-{time.time_ns()}', ) # cleanup - w.storage_credentials.delete(name=credential.name) - w.external_locations.delete(name=created.name) + w.storage_credentials.delete(delete=credential.name) + w.external_locations.delete(delete=created.name) Updates an external location in the metastore. The caller must be the owner of the external location, or be a metastore admin. In the second case, the admin can only update the name of the external diff --git a/docs/workspace/catalog/rfa.rst b/docs/workspace/catalog/rfa.rst index 3019403bb..e5e05073e 100644 --- a/docs/workspace/catalog/rfa.rst +++ b/docs/workspace/catalog/rfa.rst @@ -4,12 +4,10 @@ .. py:class:: RfaAPI - Request for Access enables customers to request access to and manage access request destinations for Unity - Catalog securables. + Request for Access enables users to request access for Unity Catalog securables. - These APIs provide a standardized way to update, get, and request to access request destinations. - Fine-grained authorization ensures that only users with appropriate permissions can manage access request - destinations. + These APIs provide a standardized way for securable owners (or users with MANAGE privileges) to manage + access request destinations. .. py:method:: batch_create_access_requests( [, requests: Optional[List[CreateAccessRequest]]]) -> BatchCreateAccessRequestsResponse diff --git a/docs/workspace/catalog/schemas.rst b/docs/workspace/catalog/schemas.rst index 719d5a156..fd1479c78 100644 --- a/docs/workspace/catalog/schemas.rst +++ b/docs/workspace/catalog/schemas.rst @@ -22,13 +22,13 @@ w = WorkspaceClient() - created_catalog = w.catalogs.create(name=f"sdk-{time.time_ns()}") + new_catalog = w.catalogs.create(name=f"sdk-{time.time_ns()}") - created_schema = w.schemas.create(name=f"sdk-{time.time_ns()}", catalog_name=created_catalog.name) + created = w.schemas.create(name=f"sdk-{time.time_ns()}", catalog_name=new_catalog.name) # cleanup - w.catalogs.delete(name=created_catalog.name, force=True) - w.schemas.delete(full_name=created_schema.full_name) + w.catalogs.delete(name=new_catalog.name, force=True) + w.schemas.delete(full_name=created.full_name) Creates a new schema for catalog in the Metastore. The caller must be a metastore admin, or have the **CREATE_SCHEMA** privilege in the parent catalog. diff --git a/docs/workspace/catalog/storage_credentials.rst b/docs/workspace/catalog/storage_credentials.rst index d8111141e..fda14984e 100644 --- a/docs/workspace/catalog/storage_credentials.rst +++ b/docs/workspace/catalog/storage_credentials.rst @@ -32,11 +32,11 @@ credential = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) # cleanup - w.storage_credentials.delete(name=credential.name) + w.storage_credentials.delete(delete=credential.name) Creates a new storage credential. @@ -123,11 +123,10 @@ .. code-block:: from databricks.sdk import WorkspaceClient - from databricks.sdk.service import catalog w = WorkspaceClient() - all = w.storage_credentials.list(catalog.ListStorageCredentialsRequest()) + all = w.storage_credentials.list() Gets an array of storage credentials (as __StorageCredentialInfo__ objects). The array is limited to only those storage credentials the caller has permission to access. If the caller is a metastore @@ -173,17 +172,17 @@ created = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) _ = w.storage_credentials.update( name=created.name, comment=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) # cleanup - w.storage_credentials.delete(delete=created.name) + w.storage_credentials.delete(name=created.name) Updates a storage credential on the metastore. diff --git a/docs/workspace/catalog/tables.rst b/docs/workspace/catalog/tables.rst index b33bef940..8de553fc2 100644 --- a/docs/workspace/catalog/tables.rst +++ b/docs/workspace/catalog/tables.rst @@ -156,7 +156,7 @@ created_schema = w.schemas.create(name=f"sdk-{time.time_ns()}", catalog_name=created_catalog.name) - all_tables = w.tables.list(catalog_name=created_catalog.name, schema_name=created_schema.name) + summaries = w.tables.list_summaries(catalog_name=created_catalog.name, schema_name_pattern=created_schema.name) # cleanup w.schemas.delete(full_name=created_schema.full_name) diff --git a/docs/workspace/iam/permissions.rst b/docs/workspace/iam/permissions.rst index ea24afd1a..15524c53e 100644 --- a/docs/workspace/iam/permissions.rst +++ b/docs/workspace/iam/permissions.rst @@ -44,7 +44,7 @@ obj = w.workspace.get_status(path=notebook_path) - _ = w.permissions.get(request_object_type="notebooks", request_object_id="%d" % (obj.object_id)) + levels = w.permissions.get_permission_levels(request_object_type="notebooks", request_object_id="%d" % (obj.object_id)) Gets the permissions of an object. Objects can inherit permissions from their parent objects or root object. diff --git a/docs/workspace/jobs/jobs.rst b/docs/workspace/jobs/jobs.rst index 0b82986de..c3ff96a4e 100644 --- a/docs/workspace/jobs/jobs.rst +++ b/docs/workspace/jobs/jobs.rst @@ -10,7 +10,7 @@ scalable resources. Your job can consist of a single task or can be a large, multi-task workflow with complex dependencies. Databricks manages the task orchestration, cluster management, monitoring, and error reporting for all of your jobs. You can run your jobs immediately or periodically through an easy-to-use - scheduling system. You can implement job tasks using notebooks, JARS, Delta Live Tables pipelines, or + scheduling system. You can implement job tasks using notebooks, JARS, Spark Declarative Pipelines, or Python, Scala, Spark submit, and Java applications. You should never hard code secrets or store them in plain text. Use the [Secrets CLI] to manage secrets in @@ -188,9 +188,10 @@ as when this job is deleted. :param environments: List[:class:`JobEnvironment`] (optional) A list of task execution environment specifications that can be referenced by serverless tasks of - this job. An environment is required to be present for serverless tasks. For serverless notebook - tasks, the environment is accessible in the notebook environment panel. For other serverless tasks, - the task environment is required to be specified using environment_key in the task settings. + this job. For serverless notebook tasks, if the environment_key is not specified, the notebook + environment will be used if present. If a jobs environment is specified, it will override the + notebook environment. For other serverless tasks, the task environment is required to be specified + using environment_key in the task settings. :param format: :class:`Format` (optional) Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. When using the Jobs API 2.1 this value is always set to `"MULTI_TASK"`. @@ -357,23 +358,21 @@ w.clusters.ensure_cluster_is_running(os.environ["DATABRICKS_CLUSTER_ID"]) and os.environ["DATABRICKS_CLUSTER_ID"] ) - created_job = w.jobs.create( - name=f"sdk-{time.time_ns()}", + run = w.jobs.submit( + run_name=f"sdk-{time.time_ns()}", tasks=[ - jobs.Task( - description="test", + jobs.SubmitTask( existing_cluster_id=cluster_id, notebook_task=jobs.NotebookTask(notebook_path=notebook_path), - task_key="test", - timeout_seconds=0, + task_key=f"sdk-{time.time_ns()}", ) ], - ) + ).result() - by_id = w.jobs.get(job_id=created_job.job_id) + output = w.jobs.get_run_output(run_id=run.tasks[0].run_id) # cleanup - w.jobs.delete(job_id=created_job.job_id) + w.jobs.delete_run(run_id=run.run_id) Get a single job. diff --git a/docs/workspace/vectorsearch/vector_search_endpoints.rst b/docs/workspace/vectorsearch/vector_search_endpoints.rst index 47a8fa59a..53c0bdd7a 100644 --- a/docs/workspace/vectorsearch/vector_search_endpoints.rst +++ b/docs/workspace/vectorsearch/vector_search_endpoints.rst @@ -55,6 +55,26 @@ :returns: Iterator over :class:`EndpointInfo` + .. py:method:: retrieve_user_visible_metrics(name: str [, end_time: Optional[str], granularity_in_seconds: Optional[int], metrics: Optional[List[Metric]], page_token: Optional[str], start_time: Optional[str]]) -> RetrieveUserVisibleMetricsResponse + + Retrieve user-visible metrics for an endpoint + + :param name: str + Vector search endpoint name + :param end_time: str (optional) + End time for metrics query + :param granularity_in_seconds: int (optional) + Granularity in seconds + :param metrics: List[:class:`Metric`] (optional) + List of metrics to retrieve + :param page_token: str (optional) + Token for pagination + :param start_time: str (optional) + Start time for metrics query + + :returns: :class:`RetrieveUserVisibleMetricsResponse` + + .. py:method:: update_endpoint_budget_policy(endpoint_name: str, budget_policy_id: str) -> PatchEndpointBudgetPolicyResponse Update the budget policy of an endpoint diff --git a/docs/workspace/workspace/workspace.rst b/docs/workspace/workspace/workspace.rst index e1b7d12b9..945dd7be2 100644 --- a/docs/workspace/workspace/workspace.rst +++ b/docs/workspace/workspace/workspace.rst @@ -79,7 +79,7 @@ notebook = f"/Users/{w.current_user.me().user_name}/sdk-{time.time_ns()}" - export_response = w.workspace.export(format=workspace.ExportFormat.SOURCE, path=notebook) + export_response = w.workspace.export_(format=workspace.ExportFormat.SOURCE, path=notebook) Exports an object or the contents of an entire directory. @@ -175,11 +175,16 @@ notebook_path = f"/Users/{w.current_user.me().user_name}/sdk-{time.time_ns()}" w.workspace.import_( - content=base64.b64encode(("CREATE LIVE TABLE dlt_sample AS SELECT 1").encode()).decode(), - format=workspace.ImportFormat.SOURCE, - language=workspace.Language.SQL, - overwrite=true_, path=notebook_path, + overwrite=True, + format=workspace.ImportFormat.SOURCE, + language=workspace.Language.PYTHON, + content=base64.b64encode( + ( + """print(1) + """ + ).encode() + ).decode(), ) Imports a workspace object (for example, a notebook or file) or the contents of an entire directory. diff --git a/tests/databricks/sdk/service/lrotesting.py b/tests/databricks/sdk/service/lrotesting.py index 679118220..c06822e8f 100755 --- a/tests/databricks/sdk/service/lrotesting.py +++ b/tests/databricks/sdk/service/lrotesting.py @@ -4,7 +4,6 @@ import logging from dataclasses import dataclass -from datetime import timedelta from enum import Enum from typing import Any, Dict, List, Optional @@ -20,11 +19,7 @@ @dataclass class DatabricksServiceExceptionWithDetailsProto: - """Serialization format for DatabricksServiceException with error details. This message doesn't - work for ScalaPB-04 as google.protobuf.Any is only available to ScalaPB-09. Note the definition - of this message should be in sync with DatabricksServiceExceptionProto defined in - /api-base/proto/legacy/databricks.proto except the later one doesn't have the error details - field defined.""" + """Databricks Error that is returned by all Databricks APIs.""" details: Optional[List[dict]] = None """@pbjson-skip""" @@ -174,24 +169,15 @@ class Operation: metadata: Optional[dict] = None """Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such - metadata. Any method that returns a long-running operation should document the metadata type, if - any.""" + metadata.""" name: Optional[str] = None """The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with - `operations/{unique_id}`. - - Note: multi-segment resource names are not yet supported in the RPC framework and SDK/TF. Until - that support is added, `name` must be string without internal `/` separators.""" + `operations/{unique_id}`.""" response: Optional[dict] = None - """The normal, successful response of the operation. If the original method returns no data on - success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is - standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the - response should have the type `XxxResponse`, where `Xxx` is the original method name. For - example, if the original method name is `TakeSnapshot()`, the inferred response type is - `TakeSnapshotResponse`.""" + """The normal, successful response of the operation.""" def as_dict(self) -> dict: """Serializes the Operation into a dictionary suitable for use as a JSON request body.""" @@ -380,13 +366,13 @@ def __init__(self, impl: LroTestingAPI, operation: Operation): self._operation = operation def wait(self, opts: Optional[lro.LroOptions] = None) -> TestResource: - """Wait blocks until the long-running operation is completed with default 20 min - timeout. If the operation didn't finish within the timeout, this function will - raise an error of type TimeoutError, otherwise returns successful response and - any errors encountered. + """Wait blocks until the long-running operation is completed. If no timeout is + specified, this will poll indefinitely. If a timeout is provided and the operation + didn't finish within the timeout, this function will raise an error of type + TimeoutError, otherwise returns successful response and any errors encountered. :param opts: :class:`LroOptions` - Timeout options (default: 20 minutes) + Timeout options (default: polls indefinitely) :returns: :class:`TestResource` """ @@ -414,7 +400,7 @@ def poll_operation(): return test_resource, None - return poll(poll_operation, timeout=opts.timeout if opts is not None else timedelta(minutes=20)) + return poll(poll_operation, timeout=opts.timeout if opts is not None else None) def cancel(self): """Starts asynchronous cancellation on a long-running operation. The server @@ -463,13 +449,13 @@ def __init__(self, impl: LroTestingAPI, operation: Operation): self._operation = operation def wait(self, opts: Optional[lro.LroOptions] = None): - """Wait blocks until the long-running operation is completed with default 20 min - timeout. If the operation didn't finish within the timeout, this function will - raise an error of type TimeoutError, otherwise returns successful response and - any errors encountered. + """Wait blocks until the long-running operation is completed. If no timeout is + specified, this will poll indefinitely. If a timeout is provided and the operation + didn't finish within the timeout, this function will raise an error of type + TimeoutError, otherwise returns successful response and any errors encountered. :param opts: :class:`LroOptions` - Timeout options (default: 20 minutes) + Timeout options (default: polls indefinitely) :returns: :class:`Any /* MISSING TYPE */` """ @@ -495,7 +481,7 @@ def poll_operation(): return {}, None - poll(poll_operation, timeout=opts.timeout if opts is not None else timedelta(minutes=20)) + poll(poll_operation, timeout=opts.timeout if opts is not None else None) def cancel(self): """Starts asynchronous cancellation on a long-running operation. The server diff --git a/tests/generated/test_json_marshall.py b/tests/generated/test_json_marshall.py index 16fc6fb26..bf5460f2e 100755 --- a/tests/generated/test_json_marshall.py +++ b/tests/generated/test_json_marshall.py @@ -190,7 +190,7 @@ def _fieldmask(d: str) -> FieldMask: required_string="non_default_string", required_struct={}, required_timestamp=_timestamp("2023-12-31T23:59:59Z"), - required_value=json.loads("{}"), + required_value=json.loads('{"key": "value"}'), test_required_enum=TestEnum.TEST_ENUM_TWO, ), """{ @@ -198,6 +198,7 @@ def _fieldmask(d: str) -> FieldMask: "required_int32": 42, "required_int64": 1234567890123456789, "required_bool": true, + "required_value": {"key": "value"}, "required_message": {}, "test_required_enum": "TEST_ENUM_TWO", "required_duration": "7200s",