diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index ad1e20e88..19009050a 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -27cebd58ae24e19c95c675db3a93b6046abaca2a \ No newline at end of file +59c4c0f3d5f0ef00cd5350b5674e941a7606d91a \ No newline at end of file diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 45460b391..cedcf5313 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -16,3 +16,10 @@ ### Internal Changes ### API Changes +* Change `table_names` field for `databricks.sdk.service.jobs.TableUpdateTriggerConfiguration` to no longer be required. +* [Breaking] Change `table_names` field for `databricks.sdk.service.jobs.TableUpdateTriggerConfiguration` to no longer be required. +* [Breaking] Remove `batch_create_materialized_features()` method for [w.feature_engineering](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/ml/feature_engineering.html) workspace-level service. +* [Breaking] Remove `lineage_context` field for `databricks.sdk.service.ml.Feature`. +* [Breaking] Remove `autoscale_v2` enum value for `databricks.sdk.service.compute.EventDetailsCause`. +* [Breaking] Remove `unsupported_conversation_type_exception` enum value for `databricks.sdk.service.dashboards.MessageErrorType`. +* [Breaking] Remove `red_state` and `yellow_state` enum values for `databricks.sdk.service.vectorsearch.EndpointStatusState`. \ No newline at end of file diff --git a/databricks/sdk/__init__.py b/databricks/sdk/__init__.py index da0509a95..0d285ccda 100755 --- a/databricks/sdk/__init__.py +++ b/databricks/sdk/__init__.py @@ -1,8 +1,7 @@ # Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -import json import logging -from typing import List, Optional +from typing import Optional import databricks.sdk.core as client import databricks.sdk.dbutils as dbutils @@ -14,7 +13,6 @@ from databricks.sdk.mixins.jobs import JobsExt from databricks.sdk.mixins.open_ai_client import ServingEndpointsExt from databricks.sdk.mixins.workspace import WorkspaceExt -from databricks.sdk.oauth import AuthorizationDetail from databricks.sdk.service import agentbricks as pkg_agentbricks from databricks.sdk.service import apps as pkg_apps from databricks.sdk.service import billing as pkg_billing @@ -220,8 +218,6 @@ def __init__( credentials_provider: Optional[CredentialsStrategy] = None, token_audience: Optional[str] = None, config: Optional[client.Config] = None, - scopes: Optional[List[str]] = None, - authorization_details: Optional[List[AuthorizationDetail]] = None, ): if not config: config = client.Config( @@ -250,12 +246,6 @@ def __init__( product=product, product_version=product_version, token_audience=token_audience, - scopes=" ".join(scopes) if scopes else None, - authorization_details=( - json.dumps([detail.as_dict() for detail in authorization_details]) - if authorization_details - else None - ), ) self._config = config.copy() self._dbutils = _make_dbutils(self._config) @@ -721,7 +711,7 @@ def permissions(self) -> pkg_iam.PermissionsAPI: @property def pipelines(self) -> pkg_pipelines.PipelinesAPI: - """The Lakeflow Spark Declarative Pipelines API allows you to create, edit, delete, start, and view details about pipelines.""" + """The Delta Live Tables API allows you to create, edit, delete, start, and view details about pipelines.""" return self._pipelines @property diff --git a/databricks/sdk/service/apps.py b/databricks/sdk/service/apps.py index f22edec9b..c6d6c2dbc 100755 --- a/databricks/sdk/service/apps.py +++ b/databricks/sdk/service/apps.py @@ -1474,7 +1474,6 @@ def from_dict(cls, d: Dict[str, Any]) -> ApplicationStatus: class ComputeSize(Enum): LARGE = "LARGE" - LIQUID = "LIQUID" MEDIUM = "MEDIUM" diff --git a/databricks/sdk/service/catalog.py b/databricks/sdk/service/catalog.py index a99c5405a..019012683 100755 --- a/databricks/sdk/service/catalog.py +++ b/databricks/sdk/service/catalog.py @@ -1750,7 +1750,6 @@ class ConnectionType(Enum): HTTP = "HTTP" MYSQL = "MYSQL" ORACLE = "ORACLE" - PALANTIR = "PALANTIR" POSTGRESQL = "POSTGRESQL" POWER_BI = "POWER_BI" REDSHIFT = "REDSHIFT" @@ -8787,7 +8786,6 @@ class SecurableKind(Enum): TABLE_FOREIGN_MYSQL = "TABLE_FOREIGN_MYSQL" TABLE_FOREIGN_NETSUITE = "TABLE_FOREIGN_NETSUITE" TABLE_FOREIGN_ORACLE = "TABLE_FOREIGN_ORACLE" - TABLE_FOREIGN_PALANTIR = "TABLE_FOREIGN_PALANTIR" TABLE_FOREIGN_POSTGRESQL = "TABLE_FOREIGN_POSTGRESQL" TABLE_FOREIGN_REDSHIFT = "TABLE_FOREIGN_REDSHIFT" TABLE_FOREIGN_SALESFORCE = "TABLE_FOREIGN_SALESFORCE" @@ -11210,8 +11208,6 @@ def list( "Accept": "application/json", } - if "max_results" not in query: - query["max_results"] = 0 while True: json = self._api.do("GET", "/api/2.1/unity-catalog/catalogs", query=query, headers=headers) if "catalogs" in json: @@ -11406,8 +11402,6 @@ def list(self, *, max_results: Optional[int] = None, page_token: Optional[str] = "Accept": "application/json", } - if "max_results" not in query: - query["max_results"] = 0 while True: json = self._api.do("GET", "/api/2.1/unity-catalog/connections", query=query, headers=headers) if "connections" in json: @@ -12320,8 +12314,6 @@ def list( "Accept": "application/json", } - if "max_results" not in query: - query["max_results"] = 0 while True: json = self._api.do("GET", "/api/2.1/unity-catalog/external-locations", query=query, headers=headers) if "external_locations" in json: @@ -12700,8 +12692,6 @@ def list( "Accept": "application/json", } - if "max_results" not in query: - query["max_results"] = 0 while True: json = self._api.do("GET", "/api/2.1/unity-catalog/functions", query=query, headers=headers) if "functions" in json: @@ -13061,8 +13051,6 @@ def list(self, *, max_results: Optional[int] = None, page_token: Optional[str] = "Accept": "application/json", } - if "max_results" not in query: - query["max_results"] = 0 while True: json = self._api.do("GET", "/api/2.1/unity-catalog/metastores", query=query, headers=headers) if "metastores" in json: @@ -14904,8 +14892,6 @@ def list( "Accept": "application/json", } - if "max_results" not in query: - query["max_results"] = 0 while True: json = self._api.do("GET", "/api/2.1/unity-catalog/schemas", query=query, headers=headers) if "schemas" in json: @@ -15134,8 +15120,6 @@ def list( "Accept": "application/json", } - if "max_results" not in query: - query["max_results"] = 0 while True: json = self._api.do("GET", "/api/2.1/unity-catalog/storage-credentials", query=query, headers=headers) if "storage_credentials" in json: @@ -15395,8 +15379,6 @@ def list( "Accept": "application/json", } - if "max_results" not in query: - query["max_results"] = 0 while True: json = self._api.do( "GET", f"/api/2.1/unity-catalog/metastores/{metastore_id}/systemschemas", query=query, headers=headers @@ -15730,8 +15712,6 @@ def list( "Accept": "application/json", } - if "max_results" not in query: - query["max_results"] = 0 while True: json = self._api.do("GET", "/api/2.1/unity-catalog/tables", query=query, headers=headers) if "tables" in json: @@ -16265,8 +16245,6 @@ def get_bindings( "Accept": "application/json", } - if "max_results" not in query: - query["max_results"] = 0 while True: json = self._api.do( "GET", diff --git a/databricks/sdk/service/compute.py b/databricks/sdk/service/compute.py index 64e6f4b0a..c4e9762bc 100755 --- a/databricks/sdk/service/compute.py +++ b/databricks/sdk/service/compute.py @@ -3363,7 +3363,6 @@ class EventDetailsCause(Enum): AUTORECOVERY = "AUTORECOVERY" AUTOSCALE = "AUTOSCALE" - AUTOSCALE_V2 = "AUTOSCALE_V2" REPLACE_BAD_NODES = "REPLACE_BAD_NODES" USER_REQUEST = "USER_REQUEST" @@ -7112,7 +7111,6 @@ class TerminationReasonCode(Enum): DOCKER_IMAGE_PULL_FAILURE = "DOCKER_IMAGE_PULL_FAILURE" DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION = "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION" DOCKER_INVALID_OS_EXCEPTION = "DOCKER_INVALID_OS_EXCEPTION" - DRIVER_DNS_RESOLUTION_FAILURE = "DRIVER_DNS_RESOLUTION_FAILURE" DRIVER_EVICTION = "DRIVER_EVICTION" DRIVER_LAUNCH_TIMEOUT = "DRIVER_LAUNCH_TIMEOUT" DRIVER_NODE_UNREACHABLE = "DRIVER_NODE_UNREACHABLE" @@ -7191,8 +7189,6 @@ class TerminationReasonCode(Enum): NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG = "NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG" NETWORK_CONFIGURATION_FAILURE = "NETWORK_CONFIGURATION_FAILURE" NFS_MOUNT_FAILURE = "NFS_MOUNT_FAILURE" - NO_ACTIVATED_K8S = "NO_ACTIVATED_K8S" - NO_ACTIVATED_K8S_TESTING_TAG = "NO_ACTIVATED_K8S_TESTING_TAG" NO_MATCHED_K8S = "NO_MATCHED_K8S" NO_MATCHED_K8S_TESTING_TAG = "NO_MATCHED_K8S_TESTING_TAG" NPIP_TUNNEL_SETUP_FAILURE = "NPIP_TUNNEL_SETUP_FAILURE" @@ -7205,7 +7201,6 @@ class TerminationReasonCode(Enum): SECRET_CREATION_FAILURE = "SECRET_CREATION_FAILURE" SECRET_PERMISSION_DENIED = "SECRET_PERMISSION_DENIED" SECRET_RESOLUTION_ERROR = "SECRET_RESOLUTION_ERROR" - SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION = "SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION" SECURITY_DAEMON_REGISTRATION_EXCEPTION = "SECURITY_DAEMON_REGISTRATION_EXCEPTION" SELF_BOOTSTRAP_FAILURE = "SELF_BOOTSTRAP_FAILURE" SERVERLESS_LONG_RUNNING_TERMINATED = "SERVERLESS_LONG_RUNNING_TERMINATED" diff --git a/databricks/sdk/service/dashboards.py b/databricks/sdk/service/dashboards.py index 5bf772f27..cf31126e1 100755 --- a/databricks/sdk/service/dashboards.py +++ b/databricks/sdk/service/dashboards.py @@ -1161,7 +1161,6 @@ class MessageErrorType(Enum): INTERNAL_CATALOG_PATH_OVERLAP_EXCEPTION = "INTERNAL_CATALOG_PATH_OVERLAP_EXCEPTION" INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION = "INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION" INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION = "INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION" - INVALID_CHAT_COMPLETION_ARGUMENTS_JSON_EXCEPTION = "INVALID_CHAT_COMPLETION_ARGUMENTS_JSON_EXCEPTION" INVALID_CHAT_COMPLETION_JSON_EXCEPTION = "INVALID_CHAT_COMPLETION_JSON_EXCEPTION" INVALID_COMPLETION_REQUEST_EXCEPTION = "INVALID_COMPLETION_REQUEST_EXCEPTION" INVALID_FUNCTION_CALL_EXCEPTION = "INVALID_FUNCTION_CALL_EXCEPTION" @@ -1189,7 +1188,6 @@ class MessageErrorType(Enum): TOO_MANY_TABLES_EXCEPTION = "TOO_MANY_TABLES_EXCEPTION" UNEXPECTED_REPLY_PROCESS_EXCEPTION = "UNEXPECTED_REPLY_PROCESS_EXCEPTION" UNKNOWN_AI_MODEL = "UNKNOWN_AI_MODEL" - UNSUPPORTED_CONVERSATION_TYPE_EXCEPTION = "UNSUPPORTED_CONVERSATION_TYPE_EXCEPTION" WAREHOUSE_ACCESS_MISSING_EXCEPTION = "WAREHOUSE_ACCESS_MISSING_EXCEPTION" WAREHOUSE_NOT_FOUND_EXCEPTION = "WAREHOUSE_NOT_FOUND_EXCEPTION" diff --git a/databricks/sdk/service/jobs.py b/databricks/sdk/service/jobs.py index 1ca8e631c..5842aef8c 100755 --- a/databricks/sdk/service/jobs.py +++ b/databricks/sdk/service/jobs.py @@ -7243,10 +7243,6 @@ def from_dict(cls, d: Dict[str, Any]) -> TableTriggerState: @dataclass class TableUpdateTriggerConfiguration: - table_names: List[str] - """A list of tables to monitor for changes. The table name must be in the format - `catalog_name.schema_name.table_name`.""" - condition: Optional[Condition] = None """The table(s) condition based on which to trigger a job run.""" @@ -7254,6 +7250,10 @@ class TableUpdateTriggerConfiguration: """If set, the trigger starts a run only after the specified amount of time has passed since the last time the trigger fired. The minimum allowed value is 60 seconds.""" + table_names: Optional[List[str]] = None + """A list of tables to monitor for changes. The table name must be in the format + `catalog_name.schema_name.table_name`.""" + wait_after_last_change_seconds: Optional[int] = None """If set, the trigger starts a run only after no table updates have occurred for the specified time and can be used to wait for a series of table updates before triggering a run. The minimum diff --git a/databricks/sdk/service/ml.py b/databricks/sdk/service/ml.py index 94fd823ca..b95021e3d 100755 --- a/databricks/sdk/service/ml.py +++ b/databricks/sdk/service/ml.py @@ -201,31 +201,6 @@ def from_dict(cls, d: Dict[str, Any]) -> ApproveTransitionRequestResponse: return cls(activity=_from_dict(d, "activity", Activity)) -@dataclass -class BatchCreateMaterializedFeaturesResponse: - materialized_features: Optional[List[MaterializedFeature]] = None - """The created materialized features with assigned IDs.""" - - def as_dict(self) -> dict: - """Serializes the BatchCreateMaterializedFeaturesResponse into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.materialized_features: - body["materialized_features"] = [v.as_dict() for v in self.materialized_features] - return body - - def as_shallow_dict(self) -> dict: - """Serializes the BatchCreateMaterializedFeaturesResponse into a shallow dictionary of its immediate attributes.""" - body = {} - if self.materialized_features: - body["materialized_features"] = self.materialized_features - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> BatchCreateMaterializedFeaturesResponse: - """Deserializes the BatchCreateMaterializedFeaturesResponse from a dictionary.""" - return cls(materialized_features=_repeated_dict(d, "materialized_features", MaterializedFeature)) - - class CommentActivityAction(Enum): """An action that a user (with sufficient permissions) could take on an activity or comment. @@ -448,31 +423,6 @@ def from_dict(cls, d: Dict[str, Any]) -> CreateLoggedModelResponse: return cls(model=_from_dict(d, "model", LoggedModel)) -@dataclass -class CreateMaterializedFeatureRequest: - materialized_feature: MaterializedFeature - """The materialized feature to create.""" - - def as_dict(self) -> dict: - """Serializes the CreateMaterializedFeatureRequest into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.materialized_feature: - body["materialized_feature"] = self.materialized_feature.as_dict() - return body - - def as_shallow_dict(self) -> dict: - """Serializes the CreateMaterializedFeatureRequest into a shallow dictionary of its immediate attributes.""" - body = {} - if self.materialized_feature: - body["materialized_feature"] = self.materialized_feature - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> CreateMaterializedFeatureRequest: - """Deserializes the CreateMaterializedFeatureRequest from a dictionary.""" - return cls(materialized_feature=_from_dict(d, "materialized_feature", MaterializedFeature)) - - @dataclass class CreateModelResponse: registered_model: Optional[Model] = None @@ -1384,9 +1334,6 @@ class Feature: filter_condition: Optional[str] = None """The filter condition applied to the source data before aggregation.""" - lineage_context: Optional[LineageContext] = None - """Lineage context information for this feature.""" - def as_dict(self) -> dict: """Serializes the Feature into a dictionary suitable for use as a JSON request body.""" body = {} @@ -1400,8 +1347,6 @@ def as_dict(self) -> dict: body["function"] = self.function.as_dict() if self.inputs: body["inputs"] = [v for v in self.inputs] - if self.lineage_context: - body["lineage_context"] = self.lineage_context.as_dict() if self.source: body["source"] = self.source.as_dict() if self.time_window: @@ -1421,8 +1366,6 @@ def as_shallow_dict(self) -> dict: body["function"] = self.function if self.inputs: body["inputs"] = self.inputs - if self.lineage_context: - body["lineage_context"] = self.lineage_context if self.source: body["source"] = self.source if self.time_window: @@ -1438,7 +1381,6 @@ def from_dict(cls, d: Dict[str, Any]) -> Feature: full_name=d.get("full_name", None), function=_from_dict(d, "function", Function), inputs=d.get("inputs", None), - lineage_context=_from_dict(d, "lineage_context", LineageContext), source=_from_dict(d, "source", DataSource), time_window=_from_dict(d, "time_window", TimeWindow), ) @@ -2250,38 +2192,6 @@ def from_dict(cls, d: Dict[str, Any]) -> InputTag: return cls(key=d.get("key", None), value=d.get("value", None)) -@dataclass -class JobContext: - job_id: Optional[int] = None - """The job ID where this API invoked.""" - - job_run_id: Optional[int] = None - """The job run ID where this API was invoked.""" - - def as_dict(self) -> dict: - """Serializes the JobContext into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.job_id is not None: - body["job_id"] = self.job_id - if self.job_run_id is not None: - body["job_run_id"] = self.job_run_id - return body - - def as_shallow_dict(self) -> dict: - """Serializes the JobContext into a shallow dictionary of its immediate attributes.""" - body = {} - if self.job_id is not None: - body["job_id"] = self.job_id - if self.job_run_id is not None: - body["job_run_id"] = self.job_run_id - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> JobContext: - """Deserializes the JobContext from a dictionary.""" - return cls(job_id=d.get("job_id", None), job_run_id=d.get("job_run_id", None)) - - @dataclass class JobSpec: job_id: str @@ -2359,42 +2269,6 @@ def from_dict(cls, d: Dict[str, Any]) -> JobSpecWithoutSecret: return cls(job_id=d.get("job_id", None), workspace_url=d.get("workspace_url", None)) -@dataclass -class LineageContext: - """Lineage context information for tracking where an API was invoked. This will allow us to track - lineage, which currently uses caller entity information for use across the Lineage Client and - Observability in Lumberjack.""" - - job_context: Optional[JobContext] = None - """Job context information including job ID and run ID.""" - - notebook_id: Optional[int] = None - """The notebook ID where this API was invoked.""" - - def as_dict(self) -> dict: - """Serializes the LineageContext into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.job_context: - body["job_context"] = self.job_context.as_dict() - if self.notebook_id is not None: - body["notebook_id"] = self.notebook_id - return body - - def as_shallow_dict(self) -> dict: - """Serializes the LineageContext into a shallow dictionary of its immediate attributes.""" - body = {} - if self.job_context: - body["job_context"] = self.job_context - if self.notebook_id is not None: - body["notebook_id"] = self.notebook_id - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> LineageContext: - """Deserializes the LineageContext from a dictionary.""" - return cls(job_context=_from_dict(d, "job_context", JobContext), notebook_id=d.get("notebook_id", None)) - - @dataclass class LinkedFeature: """Feature for model version. ([ML-57150] Renamed from Feature to LinkedFeature)""" @@ -6970,30 +6844,6 @@ class FeatureEngineeringAPI: def __init__(self, api_client): self._api = api_client - def batch_create_materialized_features( - self, requests: List[CreateMaterializedFeatureRequest] - ) -> BatchCreateMaterializedFeaturesResponse: - """Batch create materialized features. - - :param requests: List[:class:`CreateMaterializedFeatureRequest`] - The requests to create materialized features. - - :returns: :class:`BatchCreateMaterializedFeaturesResponse` - """ - - body = {} - if requests is not None: - body["requests"] = [v.as_dict() for v in requests] - headers = { - "Accept": "application/json", - "Content-Type": "application/json", - } - - res = self._api.do( - "POST", "/api/2.0/feature-engineering/materialized-features:batchCreate", body=body, headers=headers - ) - return BatchCreateMaterializedFeaturesResponse.from_dict(res) - def create_feature(self, feature: Feature) -> Feature: """Create a Feature. diff --git a/databricks/sdk/service/pipelines.py b/databricks/sdk/service/pipelines.py index 9ab410419..5b1fe193f 100755 --- a/databricks/sdk/service/pipelines.py +++ b/databricks/sdk/service/pipelines.py @@ -556,8 +556,8 @@ class IngestionGatewayPipelineDefinition: gateway_storage_name: Optional[str] = None """Optional. The Unity Catalog-compatible name for the gateway storage location. This is the - destination to use for the data that is extracted by the gateway. Spark Declarative Pipelines - system will automatically create the storage location under the catalog and schema.""" + destination to use for the data that is extracted by the gateway. Delta Live Tables system will + automatically create the storage location under the catalog and schema.""" def as_dict(self) -> dict: """Serializes the IngestionGatewayPipelineDefinition into a dictionary suitable for use as a JSON request body.""" @@ -828,31 +828,19 @@ def from_dict(cls, d: Dict[str, Any]) -> IngestionPipelineDefinitionWorkdayRepor class IngestionSourceType(Enum): BIGQUERY = "BIGQUERY" - CONFLUENCE = "CONFLUENCE" DYNAMICS365 = "DYNAMICS365" - FOREIGN_CATALOG = "FOREIGN_CATALOG" GA4_RAW_DATA = "GA4_RAW_DATA" - GOOGLE_ADS = "GOOGLE_ADS" - GUIDEWIRE = "GUIDEWIRE" - HUBSPOT = "HUBSPOT" MANAGED_POSTGRESQL = "MANAGED_POSTGRESQL" - META_MARKETING = "META_MARKETING" MYSQL = "MYSQL" NETSUITE = "NETSUITE" ORACLE = "ORACLE" POSTGRESQL = "POSTGRESQL" - REDSHIFT = "REDSHIFT" SALESFORCE = "SALESFORCE" - SALESFORCE_MARKETING_CLOUD = "SALESFORCE_MARKETING_CLOUD" SERVICENOW = "SERVICENOW" SHAREPOINT = "SHAREPOINT" - SQLDW = "SQLDW" SQLSERVER = "SQLSERVER" TERADATA = "TERADATA" - TIKTOK_ADS = "TIKTOK_ADS" - WORKDAY_HCM = "WORKDAY_HCM" WORKDAY_RAAS = "WORKDAY_RAAS" - ZENDESK = "ZENDESK" @dataclass @@ -2979,8 +2967,8 @@ class TableSpecificConfig: """The SCD type to use to ingest the table.""" sequence_by: Optional[List[str]] = None - """The column names specifying the logical order of events in the source data. Spark Declarative - Pipelines uses this sequencing to handle change events that arrive out of order.""" + """The column names specifying the logical order of events in the source data. Delta Live Tables + uses this sequencing to handle change events that arrive out of order.""" workday_report_parameters: Optional[IngestionPipelineDefinitionWorkdayReportParameters] = None """(Optional) Additional custom parameters for Workday Report""" @@ -3254,17 +3242,16 @@ class UpdateStateInfoState(Enum): class PipelinesAPI: - """The Lakeflow Spark Declarative Pipelines API allows you to create, edit, delete, start, and view details - about pipelines. + """The Delta Live Tables API allows you to create, edit, delete, start, and view details about pipelines. - Spark Declarative Pipelines is a framework for building reliable, maintainable, and testable data - processing pipelines. You define the transformations to perform on your data, and Spark Declarative - Pipelines manages task orchestration, cluster management, monitoring, data quality, and error handling. + Delta Live Tables is a framework for building reliable, maintainable, and testable data processing + pipelines. You define the transformations to perform on your data, and Delta Live Tables manages task + orchestration, cluster management, monitoring, data quality, and error handling. - Instead of defining your data pipelines using a series of separate Apache Spark tasks, Spark Declarative - Pipelines manages how your data is transformed based on a target schema you define for each processing - step. You can also enforce data quality with Spark Declarative Pipelines expectations. Expectations allow - you to define expected data quality and specify how to handle records that fail those expectations.""" + Instead of defining your data pipelines using a series of separate Apache Spark tasks, Delta Live Tables + manages how your data is transformed based on a target schema you define for each processing step. You can + also enforce data quality with Delta Live Tables expectations. Expectations allow you to define expected + data quality and specify how to handle records that fail those expectations.""" def __init__(self, api_client): self._api = api_client @@ -3627,7 +3614,7 @@ def list_pipelines( order_by: Optional[List[str]] = None, page_token: Optional[str] = None, ) -> Iterator[PipelineStateInfo]: - """Lists pipelines defined in the Spark Declarative Pipelines system. + """Lists pipelines defined in the Delta Live Tables system. :param filter: str (optional) Select a subset of results based on the specified criteria. The supported filters are: diff --git a/databricks/sdk/service/settings.py b/databricks/sdk/service/settings.py index c6126a23e..23a14ed2d 100755 --- a/databricks/sdk/service/settings.py +++ b/databricks/sdk/service/settings.py @@ -4165,8 +4165,6 @@ def from_dict(cls, d: Dict[str, Any]) -> NccPrivateEndpointRule: class NccPrivateEndpointRulePrivateLinkConnectionState(Enum): - CREATE_FAILED = "CREATE_FAILED" - CREATING = "CREATING" DISCONNECTED = "DISCONNECTED" ESTABLISHED = "ESTABLISHED" EXPIRED = "EXPIRED" diff --git a/databricks/sdk/service/sql.py b/databricks/sdk/service/sql.py index 2b839f687..bff12cd1b 100755 --- a/databricks/sdk/service/sql.py +++ b/databricks/sdk/service/sql.py @@ -6302,7 +6302,6 @@ class TerminationReasonCode(Enum): DOCKER_IMAGE_PULL_FAILURE = "DOCKER_IMAGE_PULL_FAILURE" DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION = "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION" DOCKER_INVALID_OS_EXCEPTION = "DOCKER_INVALID_OS_EXCEPTION" - DRIVER_DNS_RESOLUTION_FAILURE = "DRIVER_DNS_RESOLUTION_FAILURE" DRIVER_EVICTION = "DRIVER_EVICTION" DRIVER_LAUNCH_TIMEOUT = "DRIVER_LAUNCH_TIMEOUT" DRIVER_NODE_UNREACHABLE = "DRIVER_NODE_UNREACHABLE" @@ -6381,8 +6380,6 @@ class TerminationReasonCode(Enum): NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG = "NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG" NETWORK_CONFIGURATION_FAILURE = "NETWORK_CONFIGURATION_FAILURE" NFS_MOUNT_FAILURE = "NFS_MOUNT_FAILURE" - NO_ACTIVATED_K8S = "NO_ACTIVATED_K8S" - NO_ACTIVATED_K8S_TESTING_TAG = "NO_ACTIVATED_K8S_TESTING_TAG" NO_MATCHED_K8S = "NO_MATCHED_K8S" NO_MATCHED_K8S_TESTING_TAG = "NO_MATCHED_K8S_TESTING_TAG" NPIP_TUNNEL_SETUP_FAILURE = "NPIP_TUNNEL_SETUP_FAILURE" @@ -6395,7 +6392,6 @@ class TerminationReasonCode(Enum): SECRET_CREATION_FAILURE = "SECRET_CREATION_FAILURE" SECRET_PERMISSION_DENIED = "SECRET_PERMISSION_DENIED" SECRET_RESOLUTION_ERROR = "SECRET_RESOLUTION_ERROR" - SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION = "SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION" SECURITY_DAEMON_REGISTRATION_EXCEPTION = "SECURITY_DAEMON_REGISTRATION_EXCEPTION" SELF_BOOTSTRAP_FAILURE = "SELF_BOOTSTRAP_FAILURE" SERVERLESS_LONG_RUNNING_TERMINATED = "SERVERLESS_LONG_RUNNING_TERMINATED" diff --git a/databricks/sdk/service/vectorsearch.py b/databricks/sdk/service/vectorsearch.py index a0b731ffa..370ce78d4 100755 --- a/databricks/sdk/service/vectorsearch.py +++ b/databricks/sdk/service/vectorsearch.py @@ -598,8 +598,6 @@ class EndpointStatusState(Enum): OFFLINE = "OFFLINE" ONLINE = "ONLINE" PROVISIONING = "PROVISIONING" - RED_STATE = "RED_STATE" - YELLOW_STATE = "YELLOW_STATE" class EndpointType(Enum): @@ -1758,7 +1756,7 @@ def query_index( :param query_text: str (optional) Query text. Required for Delta Sync Index using model endpoint. :param query_type: str (optional) - The query type to use. Choices are `ANN` and `HYBRID` and `FULL_TEXT`. Defaults to `ANN`. + The query type to use. Choices are `ANN` and `HYBRID`. Defaults to `ANN`. :param query_vector: List[float] (optional) Query vector. Required for Direct Vector Access Index and Delta Sync Index using self-managed vectors. diff --git a/docs/account/iam/workspace_assignment.rst b/docs/account/iam/workspace_assignment.rst index 2a8043172..ca78b86df 100644 --- a/docs/account/iam/workspace_assignment.rst +++ b/docs/account/iam/workspace_assignment.rst @@ -43,9 +43,9 @@ a = AccountClient() - workspace_id = os.environ["DUMMY_WORKSPACE_ID"] + workspace_id = os.environ["TEST_WORKSPACE_ID"] - all = a.workspace_assignment.list(workspace_id=workspace_id) + all = a.workspace_assignment.list(list=workspace_id) Get the permission assignments for the specified Databricks account and Databricks workspace. diff --git a/docs/account/provisioning/credentials.rst b/docs/account/provisioning/credentials.rst index b71c1707e..d63648d58 100644 --- a/docs/account/provisioning/credentials.rst +++ b/docs/account/provisioning/credentials.rst @@ -24,15 +24,15 @@ a = AccountClient() - creds = a.credentials.create( + role = a.credentials.create( credentials_name=f"sdk-{time.time_ns()}", aws_credentials=provisioning.CreateCredentialAwsCredentials( - sts_role=provisioning.CreateCredentialStsRole(role_arn=os.environ["TEST_LOGDELIVERY_ARN"]) + sts_role=provisioning.CreateCredentialStsRole(role_arn=os.environ["TEST_CROSSACCOUNT_ARN"]) ), ) # cleanup - a.credentials.delete(credentials_id=creds.credentials_id) + a.credentials.delete(credentials_id=role.credentials_id) Creates a Databricks credential configuration that represents cloud cross-account credentials for a specified account. Databricks uses this to set up network infrastructure properly to host Databricks diff --git a/docs/account/provisioning/storage.rst b/docs/account/provisioning/storage.rst index 41a04deb3..25ee5abaa 100644 --- a/docs/account/provisioning/storage.rst +++ b/docs/account/provisioning/storage.rst @@ -23,13 +23,10 @@ a = AccountClient() - bucket = a.storage.create( + storage = a.storage.create( storage_configuration_name=f"sdk-{time.time_ns()}", root_bucket_info=provisioning.RootBucketInfo(bucket_name=f"sdk-{time.time_ns()}"), ) - - # cleanup - a.storage.delete(storage_configuration_id=bucket.storage_configuration_id) Creates a Databricks storage configuration for an account. diff --git a/docs/dbdataclasses/apps.rst b/docs/dbdataclasses/apps.rst index 320c875e1..2bc765a2a 100644 --- a/docs/dbdataclasses/apps.rst +++ b/docs/dbdataclasses/apps.rst @@ -329,9 +329,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: LARGE :value: "LARGE" - .. py:attribute:: LIQUID - :value: "LIQUID" - .. py:attribute:: MEDIUM :value: "MEDIUM" diff --git a/docs/dbdataclasses/catalog.rst b/docs/dbdataclasses/catalog.rst index 44209d4b9..f0c8a1af4 100644 --- a/docs/dbdataclasses/catalog.rst +++ b/docs/dbdataclasses/catalog.rst @@ -305,9 +305,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: ORACLE :value: "ORACLE" - .. py:attribute:: PALANTIR - :value: "PALANTIR" - .. py:attribute:: POSTGRESQL :value: "POSTGRESQL" @@ -1604,9 +1601,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: TABLE_FOREIGN_ORACLE :value: "TABLE_FOREIGN_ORACLE" - .. py:attribute:: TABLE_FOREIGN_PALANTIR - :value: "TABLE_FOREIGN_PALANTIR" - .. py:attribute:: TABLE_FOREIGN_POSTGRESQL :value: "TABLE_FOREIGN_POSTGRESQL" diff --git a/docs/dbdataclasses/compute.rst b/docs/dbdataclasses/compute.rst index c6064252a..60e580268 100644 --- a/docs/dbdataclasses/compute.rst +++ b/docs/dbdataclasses/compute.rst @@ -421,9 +421,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: AUTOSCALE :value: "AUTOSCALE" - .. py:attribute:: AUTOSCALE_V2 - :value: "AUTOSCALE_V2" - .. py:attribute:: REPLACE_BAD_NODES :value: "REPLACE_BAD_NODES" @@ -1216,9 +1213,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: DOCKER_INVALID_OS_EXCEPTION :value: "DOCKER_INVALID_OS_EXCEPTION" - .. py:attribute:: DRIVER_DNS_RESOLUTION_FAILURE - :value: "DRIVER_DNS_RESOLUTION_FAILURE" - .. py:attribute:: DRIVER_EVICTION :value: "DRIVER_EVICTION" @@ -1441,12 +1435,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: NFS_MOUNT_FAILURE :value: "NFS_MOUNT_FAILURE" - .. py:attribute:: NO_ACTIVATED_K8S - :value: "NO_ACTIVATED_K8S" - - .. py:attribute:: NO_ACTIVATED_K8S_TESTING_TAG - :value: "NO_ACTIVATED_K8S_TESTING_TAG" - .. py:attribute:: NO_MATCHED_K8S :value: "NO_MATCHED_K8S" @@ -1483,9 +1471,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: SECRET_RESOLUTION_ERROR :value: "SECRET_RESOLUTION_ERROR" - .. py:attribute:: SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION - :value: "SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION" - .. py:attribute:: SECURITY_DAEMON_REGISTRATION_EXCEPTION :value: "SECURITY_DAEMON_REGISTRATION_EXCEPTION" diff --git a/docs/dbdataclasses/dashboards.rst b/docs/dbdataclasses/dashboards.rst index df004c847..75d229633 100644 --- a/docs/dbdataclasses/dashboards.rst +++ b/docs/dbdataclasses/dashboards.rst @@ -214,9 +214,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION :value: "INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION" - .. py:attribute:: INVALID_CHAT_COMPLETION_ARGUMENTS_JSON_EXCEPTION - :value: "INVALID_CHAT_COMPLETION_ARGUMENTS_JSON_EXCEPTION" - .. py:attribute:: INVALID_CHAT_COMPLETION_JSON_EXCEPTION :value: "INVALID_CHAT_COMPLETION_JSON_EXCEPTION" @@ -298,9 +295,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: UNKNOWN_AI_MODEL :value: "UNKNOWN_AI_MODEL" - .. py:attribute:: UNSUPPORTED_CONVERSATION_TYPE_EXCEPTION - :value: "UNSUPPORTED_CONVERSATION_TYPE_EXCEPTION" - .. py:attribute:: WAREHOUSE_ACCESS_MISSING_EXCEPTION :value: "WAREHOUSE_ACCESS_MISSING_EXCEPTION" diff --git a/docs/dbdataclasses/ml.rst b/docs/dbdataclasses/ml.rst index 844e66245..1d8fba9ef 100644 --- a/docs/dbdataclasses/ml.rst +++ b/docs/dbdataclasses/ml.rst @@ -66,10 +66,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: BatchCreateMaterializedFeaturesResponse - :members: - :undoc-members: - .. py:class:: CommentActivityAction An action that a user (with sufficient permissions) could take on an activity or comment. @@ -118,10 +114,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: CreateMaterializedFeatureRequest - :members: - :undoc-members: - .. autoclass:: CreateModelResponse :members: :undoc-members: @@ -413,10 +405,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: JobContext - :members: - :undoc-members: - .. autoclass:: JobSpec :members: :undoc-members: @@ -425,10 +413,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: LineageContext - :members: - :undoc-members: - .. autoclass:: LinkedFeature :members: :undoc-members: diff --git a/docs/dbdataclasses/pipelines.rst b/docs/dbdataclasses/pipelines.rst index cc2f14411..fbbf456ce 100644 --- a/docs/dbdataclasses/pipelines.rst +++ b/docs/dbdataclasses/pipelines.rst @@ -139,33 +139,15 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: BIGQUERY :value: "BIGQUERY" - .. py:attribute:: CONFLUENCE - :value: "CONFLUENCE" - .. py:attribute:: DYNAMICS365 :value: "DYNAMICS365" - .. py:attribute:: FOREIGN_CATALOG - :value: "FOREIGN_CATALOG" - .. py:attribute:: GA4_RAW_DATA :value: "GA4_RAW_DATA" - .. py:attribute:: GOOGLE_ADS - :value: "GOOGLE_ADS" - - .. py:attribute:: GUIDEWIRE - :value: "GUIDEWIRE" - - .. py:attribute:: HUBSPOT - :value: "HUBSPOT" - .. py:attribute:: MANAGED_POSTGRESQL :value: "MANAGED_POSTGRESQL" - .. py:attribute:: META_MARKETING - :value: "META_MARKETING" - .. py:attribute:: MYSQL :value: "MYSQL" @@ -178,42 +160,24 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: POSTGRESQL :value: "POSTGRESQL" - .. py:attribute:: REDSHIFT - :value: "REDSHIFT" - .. py:attribute:: SALESFORCE :value: "SALESFORCE" - .. py:attribute:: SALESFORCE_MARKETING_CLOUD - :value: "SALESFORCE_MARKETING_CLOUD" - .. py:attribute:: SERVICENOW :value: "SERVICENOW" .. py:attribute:: SHAREPOINT :value: "SHAREPOINT" - .. py:attribute:: SQLDW - :value: "SQLDW" - .. py:attribute:: SQLSERVER :value: "SQLSERVER" .. py:attribute:: TERADATA :value: "TERADATA" - .. py:attribute:: TIKTOK_ADS - :value: "TIKTOK_ADS" - - .. py:attribute:: WORKDAY_HCM - :value: "WORKDAY_HCM" - .. py:attribute:: WORKDAY_RAAS :value: "WORKDAY_RAAS" - .. py:attribute:: ZENDESK - :value: "ZENDESK" - .. autoclass:: ListPipelineEventsResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/settings.rst b/docs/dbdataclasses/settings.rst index b47a84770..c010a862a 100644 --- a/docs/dbdataclasses/settings.rst +++ b/docs/dbdataclasses/settings.rst @@ -641,12 +641,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: NccPrivateEndpointRulePrivateLinkConnectionState - .. py:attribute:: CREATE_FAILED - :value: "CREATE_FAILED" - - .. py:attribute:: CREATING - :value: "CREATING" - .. py:attribute:: DISCONNECTED :value: "DISCONNECTED" diff --git a/docs/dbdataclasses/sql.rst b/docs/dbdataclasses/sql.rst index 865aba6c5..66f6340c9 100644 --- a/docs/dbdataclasses/sql.rst +++ b/docs/dbdataclasses/sql.rst @@ -1296,9 +1296,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: DOCKER_INVALID_OS_EXCEPTION :value: "DOCKER_INVALID_OS_EXCEPTION" - .. py:attribute:: DRIVER_DNS_RESOLUTION_FAILURE - :value: "DRIVER_DNS_RESOLUTION_FAILURE" - .. py:attribute:: DRIVER_EVICTION :value: "DRIVER_EVICTION" @@ -1521,12 +1518,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: NFS_MOUNT_FAILURE :value: "NFS_MOUNT_FAILURE" - .. py:attribute:: NO_ACTIVATED_K8S - :value: "NO_ACTIVATED_K8S" - - .. py:attribute:: NO_ACTIVATED_K8S_TESTING_TAG - :value: "NO_ACTIVATED_K8S_TESTING_TAG" - .. py:attribute:: NO_MATCHED_K8S :value: "NO_MATCHED_K8S" @@ -1563,9 +1554,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: SECRET_RESOLUTION_ERROR :value: "SECRET_RESOLUTION_ERROR" - .. py:attribute:: SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION - :value: "SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION" - .. py:attribute:: SECURITY_DAEMON_REGISTRATION_EXCEPTION :value: "SECURITY_DAEMON_REGISTRATION_EXCEPTION" diff --git a/docs/dbdataclasses/vectorsearch.rst b/docs/dbdataclasses/vectorsearch.rst index b8bd46536..b6250cc64 100644 --- a/docs/dbdataclasses/vectorsearch.rst +++ b/docs/dbdataclasses/vectorsearch.rst @@ -80,12 +80,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: PROVISIONING :value: "PROVISIONING" - .. py:attribute:: RED_STATE - :value: "RED_STATE" - - .. py:attribute:: YELLOW_STATE - :value: "YELLOW_STATE" - .. py:class:: EndpointType Type of endpoint. diff --git a/docs/workspace/catalog/catalogs.rst b/docs/workspace/catalog/catalogs.rst index 17297d8dd..258f994d3 100644 --- a/docs/workspace/catalog/catalogs.rst +++ b/docs/workspace/catalog/catalogs.rst @@ -155,12 +155,13 @@ import time from databricks.sdk import WorkspaceClient + from databricks.sdk.service import catalog w = WorkspaceClient() created = w.catalogs.create(name=f"sdk-{time.time_ns()}") - _ = w.catalogs.update(name=created.name, comment="updated") + _ = w.catalogs.update(name=created.name, isolation_mode=catalog.CatalogIsolationMode.ISOLATED) # cleanup w.catalogs.delete(name=created.name, force=True) diff --git a/docs/workspace/catalog/external_locations.rst b/docs/workspace/catalog/external_locations.rst index 612800956..fdf69e38a 100644 --- a/docs/workspace/catalog/external_locations.rst +++ b/docs/workspace/catalog/external_locations.rst @@ -140,10 +140,11 @@ .. code-block:: from databricks.sdk import WorkspaceClient + from databricks.sdk.service import catalog w = WorkspaceClient() - all = w.external_locations.list() + all = w.external_locations.list(catalog.ListExternalLocationsRequest()) Gets an array of external locations (__ExternalLocationInfo__ objects) from the metastore. The caller must be a metastore admin, the owner of the external location, or a user that has some privilege on @@ -190,24 +191,24 @@ credential = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) created = w.external_locations.create( name=f"sdk-{time.time_ns()}", credential_name=credential.name, - url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), + url=f's3://{os.environ["TEST_BUCKET"]}/sdk-{time.time_ns()}', ) _ = w.external_locations.update( name=created.name, credential_name=credential.name, - url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), + url=f's3://{os.environ["TEST_BUCKET"]}/sdk-{time.time_ns()}', ) # cleanup - w.storage_credentials.delete(name=credential.name) - w.external_locations.delete(name=created.name) + w.storage_credentials.delete(delete=credential.name) + w.external_locations.delete(delete=created.name) Updates an external location in the metastore. The caller must be the owner of the external location, or be a metastore admin. In the second case, the admin can only update the name of the external diff --git a/docs/workspace/catalog/storage_credentials.rst b/docs/workspace/catalog/storage_credentials.rst index d8111141e..2eacfda5e 100644 --- a/docs/workspace/catalog/storage_credentials.rst +++ b/docs/workspace/catalog/storage_credentials.rst @@ -30,13 +30,13 @@ w = WorkspaceClient() - credential = w.storage_credentials.create( + created = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) # cleanup - w.storage_credentials.delete(name=credential.name) + w.storage_credentials.delete(delete=created.name) Creates a new storage credential. diff --git a/docs/workspace/catalog/tables.rst b/docs/workspace/catalog/tables.rst index b33bef940..8de553fc2 100644 --- a/docs/workspace/catalog/tables.rst +++ b/docs/workspace/catalog/tables.rst @@ -156,7 +156,7 @@ created_schema = w.schemas.create(name=f"sdk-{time.time_ns()}", catalog_name=created_catalog.name) - all_tables = w.tables.list(catalog_name=created_catalog.name, schema_name=created_schema.name) + summaries = w.tables.list_summaries(catalog_name=created_catalog.name, schema_name_pattern=created_schema.name) # cleanup w.schemas.delete(full_name=created_schema.full_name) diff --git a/docs/workspace/iam/current_user.rst b/docs/workspace/iam/current_user.rst index b2390ce63..2f95213e2 100644 --- a/docs/workspace/iam/current_user.rst +++ b/docs/workspace/iam/current_user.rst @@ -17,7 +17,7 @@ w = WorkspaceClient() - me = w.current_user.me() + me2 = w.current_user.me() Get details about the current method caller's identity. diff --git a/docs/workspace/jobs/jobs.rst b/docs/workspace/jobs/jobs.rst index 0b82986de..59a50c294 100644 --- a/docs/workspace/jobs/jobs.rst +++ b/docs/workspace/jobs/jobs.rst @@ -522,37 +522,11 @@ .. code-block:: - import os - import time - from databricks.sdk import WorkspaceClient - from databricks.sdk.service import jobs w = WorkspaceClient() - notebook_path = f"/Users/{w.current_user.me().user_name}/sdk-{time.time_ns()}" - - cluster_id = ( - w.clusters.ensure_cluster_is_running(os.environ["DATABRICKS_CLUSTER_ID"]) and os.environ["DATABRICKS_CLUSTER_ID"] - ) - - created_job = w.jobs.create( - name=f"sdk-{time.time_ns()}", - tasks=[ - jobs.Task( - description="test", - existing_cluster_id=cluster_id, - notebook_task=jobs.NotebookTask(notebook_path=notebook_path), - task_key="test", - timeout_seconds=0, - ) - ], - ) - - run_list = w.jobs.list_runs(job_id=created_job.job_id) - - # cleanup - w.jobs.delete(job_id=created_job.job_id) + job_list = w.jobs.list(expand_tasks=False) List jobs. diff --git a/docs/workspace/ml/feature_engineering.rst b/docs/workspace/ml/feature_engineering.rst index 57c99a11c..0ec7cc8bf 100644 --- a/docs/workspace/ml/feature_engineering.rst +++ b/docs/workspace/ml/feature_engineering.rst @@ -6,16 +6,6 @@ [description] - .. py:method:: batch_create_materialized_features(requests: List[CreateMaterializedFeatureRequest]) -> BatchCreateMaterializedFeaturesResponse - - Batch create materialized features. - - :param requests: List[:class:`CreateMaterializedFeatureRequest`] - The requests to create materialized features. - - :returns: :class:`BatchCreateMaterializedFeaturesResponse` - - .. py:method:: create_feature(feature: Feature) -> Feature Create a Feature. diff --git a/docs/workspace/ml/model_registry.rst b/docs/workspace/ml/model_registry.rst index 98d803a63..601ffd87d 100644 --- a/docs/workspace/ml/model_registry.rst +++ b/docs/workspace/ml/model_registry.rst @@ -90,7 +90,9 @@ w = WorkspaceClient() - created = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") + model = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") + + created = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") Creates a new registered model with the name specified in the request body. Throws `RESOURCE_ALREADY_EXISTS` if a registered model with the given name exists. @@ -734,13 +736,14 @@ w = WorkspaceClient() - created = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") + model = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") - model = w.model_registry.get_model(name=created.registered_model.name) + created = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") - w.model_registry.update_model( - name=model.registered_model_databricks.name, + w.model_registry.update_model_version( description=f"sdk-{time.time_ns()}", + name=created.model_version.name, + version=created.model_version.version, ) Updates a registered model. diff --git a/docs/workspace/pipelines/pipelines.rst b/docs/workspace/pipelines/pipelines.rst index b0bada615..502061df3 100644 --- a/docs/workspace/pipelines/pipelines.rst +++ b/docs/workspace/pipelines/pipelines.rst @@ -4,17 +4,16 @@ .. py:class:: PipelinesAPI - The Lakeflow Spark Declarative Pipelines API allows you to create, edit, delete, start, and view details - about pipelines. + The Delta Live Tables API allows you to create, edit, delete, start, and view details about pipelines. - Spark Declarative Pipelines is a framework for building reliable, maintainable, and testable data - processing pipelines. You define the transformations to perform on your data, and Spark Declarative - Pipelines manages task orchestration, cluster management, monitoring, data quality, and error handling. + Delta Live Tables is a framework for building reliable, maintainable, and testable data processing + pipelines. You define the transformations to perform on your data, and Delta Live Tables manages task + orchestration, cluster management, monitoring, data quality, and error handling. - Instead of defining your data pipelines using a series of separate Apache Spark tasks, Spark Declarative - Pipelines manages how your data is transformed based on a target schema you define for each processing - step. You can also enforce data quality with Spark Declarative Pipelines expectations. Expectations allow - you to define expected data quality and specify how to handle records that fail those expectations. + Instead of defining your data pipelines using a series of separate Apache Spark tasks, Delta Live Tables + manages how your data is transformed based on a target schema you define for each processing step. You can + also enforce data quality with Delta Live Tables expectations. Expectations allow you to define expected + data quality and specify how to handle records that fail those expectations. .. py:method:: create( [, allow_duplicate_names: Optional[bool], budget_policy_id: Optional[str], catalog: Optional[str], channel: Optional[str], clusters: Optional[List[PipelineCluster]], configuration: Optional[Dict[str, str]], continuous: Optional[bool], deployment: Optional[PipelineDeployment], development: Optional[bool], dry_run: Optional[bool], edition: Optional[str], environment: Optional[PipelinesEnvironment], event_log: Optional[EventLogSpec], filters: Optional[Filters], gateway_definition: Optional[IngestionGatewayPipelineDefinition], id: Optional[str], ingestion_definition: Optional[IngestionPipelineDefinition], libraries: Optional[List[PipelineLibrary]], name: Optional[str], notifications: Optional[List[Notifications]], photon: Optional[bool], restart_window: Optional[RestartWindow], root_path: Optional[str], run_as: Optional[RunAs], schema: Optional[str], serverless: Optional[bool], storage: Optional[str], tags: Optional[Dict[str, str]], target: Optional[str], trigger: Optional[PipelineTrigger], usage_policy_id: Optional[str]]) -> CreatePipelineResponse @@ -292,7 +291,7 @@ all = w.pipelines.list_pipelines(pipelines.ListPipelinesRequest()) - Lists pipelines defined in the Spark Declarative Pipelines system. + Lists pipelines defined in the Delta Live Tables system. :param filter: str (optional) Select a subset of results based on the specified criteria. The supported filters are: diff --git a/docs/workspace/sql/queries.rst b/docs/workspace/sql/queries.rst index 0dfb63fbf..f0081b3f2 100644 --- a/docs/workspace/sql/queries.rst +++ b/docs/workspace/sql/queries.rst @@ -29,7 +29,7 @@ display_name=f"sdk-{time.time_ns()}", warehouse_id=srcs[0].warehouse_id, description="test query from Go SDK", - query_text="SELECT 1", + query_text="SHOW TABLES", ) ) diff --git a/docs/workspace/vectorsearch/vector_search_indexes.rst b/docs/workspace/vectorsearch/vector_search_indexes.rst index c89f3579c..11417c9da 100644 --- a/docs/workspace/vectorsearch/vector_search_indexes.rst +++ b/docs/workspace/vectorsearch/vector_search_indexes.rst @@ -102,7 +102,7 @@ :param query_text: str (optional) Query text. Required for Delta Sync Index using model endpoint. :param query_type: str (optional) - The query type to use. Choices are `ANN` and `HYBRID` and `FULL_TEXT`. Defaults to `ANN`. + The query type to use. Choices are `ANN` and `HYBRID`. Defaults to `ANN`. :param query_vector: List[float] (optional) Query vector. Required for Direct Vector Access Index and Delta Sync Index using self-managed vectors. diff --git a/docs/workspace/workspace/workspace.rst b/docs/workspace/workspace/workspace.rst index e1b7d12b9..75b2457dd 100644 --- a/docs/workspace/workspace/workspace.rst +++ b/docs/workspace/workspace/workspace.rst @@ -79,7 +79,7 @@ notebook = f"/Users/{w.current_user.me().user_name}/sdk-{time.time_ns()}" - export_response = w.workspace.export(format=workspace.ExportFormat.SOURCE, path=notebook) + export_response = w.workspace.export_(format=workspace.ExportFormat.SOURCE, path=notebook) Exports an object or the contents of an entire directory. diff --git a/tests/databricks/sdk/service/lrotesting.py b/tests/databricks/sdk/service/lrotesting.py index 679118220..c06822e8f 100755 --- a/tests/databricks/sdk/service/lrotesting.py +++ b/tests/databricks/sdk/service/lrotesting.py @@ -4,7 +4,6 @@ import logging from dataclasses import dataclass -from datetime import timedelta from enum import Enum from typing import Any, Dict, List, Optional @@ -20,11 +19,7 @@ @dataclass class DatabricksServiceExceptionWithDetailsProto: - """Serialization format for DatabricksServiceException with error details. This message doesn't - work for ScalaPB-04 as google.protobuf.Any is only available to ScalaPB-09. Note the definition - of this message should be in sync with DatabricksServiceExceptionProto defined in - /api-base/proto/legacy/databricks.proto except the later one doesn't have the error details - field defined.""" + """Databricks Error that is returned by all Databricks APIs.""" details: Optional[List[dict]] = None """@pbjson-skip""" @@ -174,24 +169,15 @@ class Operation: metadata: Optional[dict] = None """Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such - metadata. Any method that returns a long-running operation should document the metadata type, if - any.""" + metadata.""" name: Optional[str] = None """The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with - `operations/{unique_id}`. - - Note: multi-segment resource names are not yet supported in the RPC framework and SDK/TF. Until - that support is added, `name` must be string without internal `/` separators.""" + `operations/{unique_id}`.""" response: Optional[dict] = None - """The normal, successful response of the operation. If the original method returns no data on - success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is - standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the - response should have the type `XxxResponse`, where `Xxx` is the original method name. For - example, if the original method name is `TakeSnapshot()`, the inferred response type is - `TakeSnapshotResponse`.""" + """The normal, successful response of the operation.""" def as_dict(self) -> dict: """Serializes the Operation into a dictionary suitable for use as a JSON request body.""" @@ -380,13 +366,13 @@ def __init__(self, impl: LroTestingAPI, operation: Operation): self._operation = operation def wait(self, opts: Optional[lro.LroOptions] = None) -> TestResource: - """Wait blocks until the long-running operation is completed with default 20 min - timeout. If the operation didn't finish within the timeout, this function will - raise an error of type TimeoutError, otherwise returns successful response and - any errors encountered. + """Wait blocks until the long-running operation is completed. If no timeout is + specified, this will poll indefinitely. If a timeout is provided and the operation + didn't finish within the timeout, this function will raise an error of type + TimeoutError, otherwise returns successful response and any errors encountered. :param opts: :class:`LroOptions` - Timeout options (default: 20 minutes) + Timeout options (default: polls indefinitely) :returns: :class:`TestResource` """ @@ -414,7 +400,7 @@ def poll_operation(): return test_resource, None - return poll(poll_operation, timeout=opts.timeout if opts is not None else timedelta(minutes=20)) + return poll(poll_operation, timeout=opts.timeout if opts is not None else None) def cancel(self): """Starts asynchronous cancellation on a long-running operation. The server @@ -463,13 +449,13 @@ def __init__(self, impl: LroTestingAPI, operation: Operation): self._operation = operation def wait(self, opts: Optional[lro.LroOptions] = None): - """Wait blocks until the long-running operation is completed with default 20 min - timeout. If the operation didn't finish within the timeout, this function will - raise an error of type TimeoutError, otherwise returns successful response and - any errors encountered. + """Wait blocks until the long-running operation is completed. If no timeout is + specified, this will poll indefinitely. If a timeout is provided and the operation + didn't finish within the timeout, this function will raise an error of type + TimeoutError, otherwise returns successful response and any errors encountered. :param opts: :class:`LroOptions` - Timeout options (default: 20 minutes) + Timeout options (default: polls indefinitely) :returns: :class:`Any /* MISSING TYPE */` """ @@ -495,7 +481,7 @@ def poll_operation(): return {}, None - poll(poll_operation, timeout=opts.timeout if opts is not None else timedelta(minutes=20)) + poll(poll_operation, timeout=opts.timeout if opts is not None else None) def cancel(self): """Starts asynchronous cancellation on a long-running operation. The server diff --git a/tests/generated/test_json_marshall.py b/tests/generated/test_json_marshall.py index 16fc6fb26..bf5460f2e 100755 --- a/tests/generated/test_json_marshall.py +++ b/tests/generated/test_json_marshall.py @@ -190,7 +190,7 @@ def _fieldmask(d: str) -> FieldMask: required_string="non_default_string", required_struct={}, required_timestamp=_timestamp("2023-12-31T23:59:59Z"), - required_value=json.loads("{}"), + required_value=json.loads('{"key": "value"}'), test_required_enum=TestEnum.TEST_ENUM_TWO, ), """{ @@ -198,6 +198,7 @@ def _fieldmask(d: str) -> FieldMask: "required_int32": 42, "required_int64": 1234567890123456789, "required_bool": true, + "required_value": {"key": "value"}, "required_message": {}, "test_required_enum": "TEST_ENUM_TWO", "required_duration": "7200s",