diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index e9f9e0a0e..7f9f41bb8 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -248f4ad9668661da9d0bf4a7b0119a2d44fd1e75 \ No newline at end of file +bc17b474818138f19b78a7bea0675707dead2b87 \ No newline at end of file diff --git a/databricks/sdk/__init__.py b/databricks/sdk/__init__.py index 617f2cee2..848272198 100755 --- a/databricks/sdk/__init__.py +++ b/databricks/sdk/__init__.py @@ -71,6 +71,7 @@ CspEnablementAccountAPI, DefaultNamespaceAPI, DisableLegacyAccessAPI, + DisableLegacyDbfsAPI, DisableLegacyFeaturesAPI, EnhancedSecurityMonitoringAPI, EsmEnablementAccountAPI, diff --git a/databricks/sdk/service/apps.py b/databricks/sdk/service/apps.py index 9cafe235e..63bc981ba 100755 --- a/databricks/sdk/service/apps.py +++ b/databricks/sdk/service/apps.py @@ -25,7 +25,8 @@ class App: It must be unique within the workspace.""" active_deployment: Optional[AppDeployment] = None - """The active deployment of the app.""" + """The active deployment of the app. A deployment is considered active when it has been deployed to + the app compute.""" app_status: Optional[ApplicationStatus] = None @@ -37,11 +38,19 @@ class App: creator: Optional[str] = None """The email of the user that created the app.""" + default_source_code_path: Optional[str] = None + """The default workspace file system path of the source code from which app deployment are created. + This field tracks the workspace source code path of the last active deployment.""" + description: Optional[str] = None """The description of the app.""" pending_deployment: Optional[AppDeployment] = None - """The pending deployment of the app.""" + """The pending deployment of the app. A deployment is considered pending when it is being prepared + for deployment to the app compute.""" + + resources: Optional[List[AppResource]] = None + """Resources for the app.""" service_principal_id: Optional[int] = None @@ -64,9 +73,12 @@ def as_dict(self) -> dict: if self.compute_status: body['compute_status'] = self.compute_status.as_dict() if self.create_time is not None: body['create_time'] = self.create_time if self.creator is not None: body['creator'] = self.creator + if self.default_source_code_path is not None: + body['default_source_code_path'] = self.default_source_code_path if self.description is not None: body['description'] = self.description if self.name is not None: body['name'] = self.name if self.pending_deployment: body['pending_deployment'] = self.pending_deployment.as_dict() + if self.resources: body['resources'] = [v.as_dict() for v in self.resources] if self.service_principal_id is not None: body['service_principal_id'] = self.service_principal_id if self.service_principal_name is not None: body['service_principal_name'] = self.service_principal_name @@ -83,9 +95,11 @@ def from_dict(cls, d: Dict[str, any]) -> App: compute_status=_from_dict(d, 'compute_status', ComputeStatus), create_time=d.get('create_time', None), creator=d.get('creator', None), + default_source_code_path=d.get('default_source_code_path', None), description=d.get('description', None), name=d.get('name', None), pending_deployment=_from_dict(d, 'pending_deployment', AppDeployment), + resources=_repeated_dict(d, 'resources', AppResource), service_principal_id=d.get('service_principal_id', None), service_principal_name=d.get('service_principal_name', None), update_time=d.get('update_time', None), @@ -372,6 +386,170 @@ def from_dict(cls, d: Dict[str, any]) -> AppPermissionsRequest: app_name=d.get('app_name', None)) +@dataclass +class AppResource: + name: str + """Name of the App Resource.""" + + description: Optional[str] = None + """Description of the App Resource.""" + + job: Optional[AppResourceJob] = None + + secret: Optional[AppResourceSecret] = None + + serving_endpoint: Optional[AppResourceServingEndpoint] = None + + sql_warehouse: Optional[AppResourceSqlWarehouse] = None + + def as_dict(self) -> dict: + """Serializes the AppResource into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.description is not None: body['description'] = self.description + if self.job: body['job'] = self.job.as_dict() + if self.name is not None: body['name'] = self.name + if self.secret: body['secret'] = self.secret.as_dict() + if self.serving_endpoint: body['serving_endpoint'] = self.serving_endpoint.as_dict() + if self.sql_warehouse: body['sql_warehouse'] = self.sql_warehouse.as_dict() + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> AppResource: + """Deserializes the AppResource from a dictionary.""" + return cls(description=d.get('description', None), + job=_from_dict(d, 'job', AppResourceJob), + name=d.get('name', None), + secret=_from_dict(d, 'secret', AppResourceSecret), + serving_endpoint=_from_dict(d, 'serving_endpoint', AppResourceServingEndpoint), + sql_warehouse=_from_dict(d, 'sql_warehouse', AppResourceSqlWarehouse)) + + +@dataclass +class AppResourceJob: + id: str + """Id of the job to grant permission on.""" + + permission: AppResourceJobJobPermission + """Permissions to grant on the Job. Supported permissions are: "CAN_MANAGE", "IS_OWNER", + "CAN_MANAGE_RUN", "CAN_VIEW".""" + + def as_dict(self) -> dict: + """Serializes the AppResourceJob into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.id is not None: body['id'] = self.id + if self.permission is not None: body['permission'] = self.permission.value + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> AppResourceJob: + """Deserializes the AppResourceJob from a dictionary.""" + return cls(id=d.get('id', None), permission=_enum(d, 'permission', AppResourceJobJobPermission)) + + +class AppResourceJobJobPermission(Enum): + + CAN_MANAGE = 'CAN_MANAGE' + CAN_MANAGE_RUN = 'CAN_MANAGE_RUN' + CAN_VIEW = 'CAN_VIEW' + IS_OWNER = 'IS_OWNER' + + +@dataclass +class AppResourceSecret: + scope: str + """Scope of the secret to grant permission on.""" + + key: str + """Key of the secret to grant permission on.""" + + permission: AppResourceSecretSecretPermission + """Permission to grant on the secret scope. For secrets, only one permission is allowed. Permission + must be one of: "READ", "WRITE", "MANAGE".""" + + def as_dict(self) -> dict: + """Serializes the AppResourceSecret into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.key is not None: body['key'] = self.key + if self.permission is not None: body['permission'] = self.permission.value + if self.scope is not None: body['scope'] = self.scope + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> AppResourceSecret: + """Deserializes the AppResourceSecret from a dictionary.""" + return cls(key=d.get('key', None), + permission=_enum(d, 'permission', AppResourceSecretSecretPermission), + scope=d.get('scope', None)) + + +class AppResourceSecretSecretPermission(Enum): + """Permission to grant on the secret scope. Supported permissions are: "READ", "WRITE", "MANAGE".""" + + MANAGE = 'MANAGE' + READ = 'READ' + WRITE = 'WRITE' + + +@dataclass +class AppResourceServingEndpoint: + name: str + """Name of the serving endpoint to grant permission on.""" + + permission: AppResourceServingEndpointServingEndpointPermission + """Permission to grant on the serving endpoint. Supported permissions are: "CAN_MANAGE", + "CAN_QUERY", "CAN_VIEW".""" + + def as_dict(self) -> dict: + """Serializes the AppResourceServingEndpoint into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.name is not None: body['name'] = self.name + if self.permission is not None: body['permission'] = self.permission.value + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> AppResourceServingEndpoint: + """Deserializes the AppResourceServingEndpoint from a dictionary.""" + return cls(name=d.get('name', None), + permission=_enum(d, 'permission', AppResourceServingEndpointServingEndpointPermission)) + + +class AppResourceServingEndpointServingEndpointPermission(Enum): + + CAN_MANAGE = 'CAN_MANAGE' + CAN_QUERY = 'CAN_QUERY' + CAN_VIEW = 'CAN_VIEW' + + +@dataclass +class AppResourceSqlWarehouse: + id: str + """Id of the SQL warehouse to grant permission on.""" + + permission: AppResourceSqlWarehouseSqlWarehousePermission + """Permission to grant on the SQL warehouse. Supported permissions are: "CAN_MANAGE", "CAN_USE", + "IS_OWNER".""" + + def as_dict(self) -> dict: + """Serializes the AppResourceSqlWarehouse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.id is not None: body['id'] = self.id + if self.permission is not None: body['permission'] = self.permission.value + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> AppResourceSqlWarehouse: + """Deserializes the AppResourceSqlWarehouse from a dictionary.""" + return cls(id=d.get('id', None), + permission=_enum(d, 'permission', AppResourceSqlWarehouseSqlWarehousePermission)) + + +class AppResourceSqlWarehouseSqlWarehousePermission(Enum): + + CAN_MANAGE = 'CAN_MANAGE' + CAN_USE = 'CAN_USE' + IS_OWNER = 'IS_OWNER' + + class ApplicationState(Enum): CRASHED = 'CRASHED' @@ -478,17 +656,23 @@ class CreateAppRequest: description: Optional[str] = None """The description of the app.""" + resources: Optional[List[AppResource]] = None + """Resources for the app.""" + def as_dict(self) -> dict: """Serializes the CreateAppRequest into a dictionary suitable for use as a JSON request body.""" body = {} if self.description is not None: body['description'] = self.description if self.name is not None: body['name'] = self.name + if self.resources: body['resources'] = [v.as_dict() for v in self.resources] return body @classmethod def from_dict(cls, d: Dict[str, any]) -> CreateAppRequest: """Deserializes the CreateAppRequest from a dictionary.""" - return cls(description=d.get('description', None), name=d.get('name', None)) + return cls(description=d.get('description', None), + name=d.get('name', None), + resources=_repeated_dict(d, 'resources', AppResource)) @dataclass @@ -571,17 +755,23 @@ class UpdateAppRequest: description: Optional[str] = None """The description of the app.""" + resources: Optional[List[AppResource]] = None + """Resources for the app.""" + def as_dict(self) -> dict: """Serializes the UpdateAppRequest into a dictionary suitable for use as a JSON request body.""" body = {} if self.description is not None: body['description'] = self.description if self.name is not None: body['name'] = self.name + if self.resources: body['resources'] = [v.as_dict() for v in self.resources] return body @classmethod def from_dict(cls, d: Dict[str, any]) -> UpdateAppRequest: """Deserializes the UpdateAppRequest from a dictionary.""" - return cls(description=d.get('description', None), name=d.get('name', None)) + return cls(description=d.get('description', None), + name=d.get('name', None), + resources=_repeated_dict(d, 'resources', AppResource)) class AppsAPI: @@ -689,7 +879,11 @@ def wait_get_deployment_app_succeeded( attempt += 1 raise TimeoutError(f'timed out after {timeout}: {status_message}') - def create(self, name: str, *, description: Optional[str] = None) -> Wait[App]: + def create(self, + name: str, + *, + description: Optional[str] = None, + resources: Optional[List[AppResource]] = None) -> Wait[App]: """Create an app. Creates a new app. @@ -699,6 +893,8 @@ def create(self, name: str, *, description: Optional[str] = None) -> Wait[App]: must be unique within the workspace. :param description: str (optional) The description of the app. + :param resources: List[:class:`AppResource`] (optional) + Resources for the app. :returns: Long-running operation waiter for :class:`App`. @@ -707,6 +903,7 @@ def create(self, name: str, *, description: Optional[str] = None) -> Wait[App]: body = {} if description is not None: body['description'] = description if name is not None: body['name'] = name + if resources is not None: body['resources'] = [v.as_dict() for v in resources] headers = {'Accept': 'application/json', 'Content-Type': 'application/json', } op_response = self._api.do('POST', '/api/2.0/apps', body=body, headers=headers) @@ -716,8 +913,9 @@ def create_and_wait(self, name: str, *, description: Optional[str] = None, + resources: Optional[List[AppResource]] = None, timeout=timedelta(minutes=20)) -> App: - return self.create(description=description, name=name).result(timeout=timeout) + return self.create(description=description, name=name, resources=resources).result(timeout=timeout) def delete(self, name: str) -> App: """Delete an app. @@ -981,7 +1179,11 @@ def stop(self, name: str) -> Wait[App]: def stop_and_wait(self, name: str, timeout=timedelta(minutes=20)) -> App: return self.stop(name=name).result(timeout=timeout) - def update(self, name: str, *, description: Optional[str] = None) -> App: + def update(self, + name: str, + *, + description: Optional[str] = None, + resources: Optional[List[AppResource]] = None) -> App: """Update an app. Updates the app with the supplied name. @@ -991,11 +1193,14 @@ def update(self, name: str, *, description: Optional[str] = None) -> App: must be unique within the workspace. :param description: str (optional) The description of the app. + :param resources: List[:class:`AppResource`] (optional) + Resources for the app. :returns: :class:`App` """ body = {} if description is not None: body['description'] = description + if resources is not None: body['resources'] = [v.as_dict() for v in resources] headers = {'Accept': 'application/json', 'Content-Type': 'application/json', } res = self._api.do('PATCH', f'/api/2.0/apps/{name}', body=body, headers=headers) diff --git a/databricks/sdk/service/catalog.py b/databricks/sdk/service/catalog.py index 9c795dc2a..2ccff4217 100755 --- a/databricks/sdk/service/catalog.py +++ b/databricks/sdk/service/catalog.py @@ -909,6 +909,7 @@ class ConnectionInfoSecurableKind(Enum): CONNECTION_DATABRICKS = 'CONNECTION_DATABRICKS' CONNECTION_EXTERNAL_HIVE_METASTORE = 'CONNECTION_EXTERNAL_HIVE_METASTORE' CONNECTION_GLUE = 'CONNECTION_GLUE' + CONNECTION_HTTP_BEARER = 'CONNECTION_HTTP_BEARER' CONNECTION_MYSQL = 'CONNECTION_MYSQL' CONNECTION_ONLINE_CATALOG = 'CONNECTION_ONLINE_CATALOG' CONNECTION_POSTGRESQL = 'CONNECTION_POSTGRESQL' @@ -925,6 +926,7 @@ class ConnectionType(Enum): DATABRICKS = 'DATABRICKS' GLUE = 'GLUE' HIVE_METASTORE = 'HIVE_METASTORE' + HTTP = 'HTTP' MYSQL = 'MYSQL' POSTGRESQL = 'POSTGRESQL' REDSHIFT = 'REDSHIFT' @@ -1676,6 +1678,7 @@ def from_dict(cls, d: Dict[str, any]) -> CreateVolumeRequestContent: class CredentialType(Enum): """The type of credential.""" + BEARER_TOKEN = 'BEARER_TOKEN' USERNAME_PASSWORD = 'USERNAME_PASSWORD' @@ -2547,8 +2550,8 @@ class GenerateTemporaryTableCredentialResponse: https://docs.microsoft.com/en-us/rest/api/storageservices/create-user-delegation-sas""" expiration_time: Optional[int] = None - """Server time when the credential will expire, in unix epoch milliseconds since January 1, 1970 at - 00:00:00 UTC. The API client is advised to cache the credential given this expiration time.""" + """Server time when the credential will expire, in epoch milliseconds. The API client is advised to + cache the credential given this expiration time.""" gcp_oauth_token: Optional[GcpOauthToken] = None """GCP temporary credentials for API authentication. Read more at diff --git a/databricks/sdk/service/dashboards.py b/databricks/sdk/service/dashboards.py index 6e85cf45c..27117d43a 100755 --- a/databricks/sdk/service/dashboards.py +++ b/databricks/sdk/service/dashboards.py @@ -169,8 +169,8 @@ class Dashboard: trailing slash. This field is excluded in List Dashboards responses.""" path: Optional[str] = None - """The workspace path of the dashboard asset, including the file name. This field is excluded in - List Dashboards responses.""" + """The workspace path of the dashboard asset, including the file name. Exported dashboards always + have the file extension `.lvdash.json`. This field is excluded in List Dashboards responses.""" serialized_dashboard: Optional[str] = None """The contents of the dashboard in serialized string form. This field is excluded in List diff --git a/databricks/sdk/service/jobs.py b/databricks/sdk/service/jobs.py index e7fbddb48..b3c723f37 100755 --- a/databricks/sdk/service/jobs.py +++ b/databricks/sdk/service/jobs.py @@ -2478,6 +2478,7 @@ class RepairRun: [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html""" pipeline_params: Optional[PipelineParams] = None + """Controls whether the pipeline should perform a full refresh""" python_named_params: Optional[Dict[str, str]] = None @@ -3181,6 +3182,7 @@ class RunJobTask: [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html""" pipeline_params: Optional[PipelineParams] = None + """Controls whether the pipeline should perform a full refresh""" python_named_params: Optional[Dict[str, str]] = None @@ -3340,6 +3342,7 @@ class RunNow: [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html""" pipeline_params: Optional[PipelineParams] = None + """Controls whether the pipeline should perform a full refresh""" python_named_params: Optional[Dict[str, str]] = None @@ -3549,6 +3552,7 @@ class RunParameters: [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html""" pipeline_params: Optional[PipelineParams] = None + """Controls whether the pipeline should perform a full refresh""" python_named_params: Optional[Dict[str, str]] = None @@ -6087,6 +6091,7 @@ def repair_run(self, [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html :param pipeline_params: :class:`PipelineParams` (optional) + Controls whether the pipeline should perform a full refresh :param python_named_params: Dict[str,str] (optional) :param python_params: List[str] (optional) A list of parameters for jobs with Python tasks, for example `"python_params": ["john doe", "35"]`. @@ -6276,6 +6281,7 @@ def run_now(self, [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html :param pipeline_params: :class:`PipelineParams` (optional) + Controls whether the pipeline should perform a full refresh :param python_named_params: Dict[str,str] (optional) :param python_params: List[str] (optional) A list of parameters for jobs with Python tasks, for example `"python_params": ["john doe", "35"]`. diff --git a/databricks/sdk/service/settings.py b/databricks/sdk/service/settings.py index fd7ed5dd8..a6a235158 100755 --- a/databricks/sdk/service/settings.py +++ b/databricks/sdk/service/settings.py @@ -720,6 +720,30 @@ def from_dict(cls, d: Dict[str, any]) -> DeleteDisableLegacyAccessResponse: return cls(etag=d.get('etag', None)) +@dataclass +class DeleteDisableLegacyDbfsResponse: + """The etag is returned.""" + + etag: str + """etag used for versioning. The response is at least as fresh as the eTag provided. This is used + for optimistic concurrency control as a way to help prevent simultaneous writes of a setting + overwriting each other. It is strongly suggested that systems make use of the etag in the read + -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get + an etag from a GET request, and pass it with the DELETE request to identify the rule set version + you are deleting.""" + + def as_dict(self) -> dict: + """Serializes the DeleteDisableLegacyDbfsResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.etag is not None: body['etag'] = self.etag + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> DeleteDisableLegacyDbfsResponse: + """Deserializes the DeleteDisableLegacyDbfsResponse from a dictionary.""" + return cls(etag=d.get('etag', None)) + + @dataclass class DeleteDisableLegacyFeaturesResponse: """The etag is returned.""" @@ -863,6 +887,40 @@ def from_dict(cls, d: Dict[str, any]) -> DisableLegacyAccess: setting_name=d.get('setting_name', None)) +@dataclass +class DisableLegacyDbfs: + disable_legacy_dbfs: BooleanMessage + + etag: Optional[str] = None + """etag used for versioning. The response is at least as fresh as the eTag provided. This is used + for optimistic concurrency control as a way to help prevent simultaneous writes of a setting + overwriting each other. It is strongly suggested that systems make use of the etag in the read + -> update pattern to perform setting updates in order to avoid race conditions. That is, get an + etag from a GET request, and pass it with the PATCH request to identify the setting version you + are updating.""" + + setting_name: Optional[str] = None + """Name of the corresponding setting. This field is populated in the response, but it will not be + respected even if it's set in the request body. The setting name in the path parameter will be + respected instead. Setting name is required to be 'default' if the setting only has one instance + per workspace.""" + + def as_dict(self) -> dict: + """Serializes the DisableLegacyDbfs into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.disable_legacy_dbfs: body['disable_legacy_dbfs'] = self.disable_legacy_dbfs.as_dict() + if self.etag is not None: body['etag'] = self.etag + if self.setting_name is not None: body['setting_name'] = self.setting_name + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> DisableLegacyDbfs: + """Deserializes the DisableLegacyDbfs from a dictionary.""" + return cls(disable_legacy_dbfs=_from_dict(d, 'disable_legacy_dbfs', BooleanMessage), + etag=d.get('etag', None), + setting_name=d.get('setting_name', None)) + + @dataclass class DisableLegacyFeatures: disable_legacy_features: BooleanMessage @@ -2534,6 +2592,36 @@ def from_dict(cls, d: Dict[str, any]) -> UpdateDisableLegacyAccessRequest: setting=_from_dict(d, 'setting', DisableLegacyAccess)) +@dataclass +class UpdateDisableLegacyDbfsRequest: + """Details required to update a setting.""" + + allow_missing: bool + """This should always be set to true for Settings API. Added for AIP compliance.""" + + setting: DisableLegacyDbfs + + field_mask: str + """Field mask is required to be passed into the PATCH request. Field mask specifies which fields of + the setting payload will be updated. The field mask needs to be supplied as single string. To + specify multiple fields in the field mask, use comma as the separator (no space).""" + + def as_dict(self) -> dict: + """Serializes the UpdateDisableLegacyDbfsRequest into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.allow_missing is not None: body['allow_missing'] = self.allow_missing + if self.field_mask is not None: body['field_mask'] = self.field_mask + if self.setting: body['setting'] = self.setting.as_dict() + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> UpdateDisableLegacyDbfsRequest: + """Deserializes the UpdateDisableLegacyDbfsRequest from a dictionary.""" + return cls(allow_missing=d.get('allow_missing', None), + field_mask=d.get('field_mask', None), + setting=_from_dict(d, 'setting', DisableLegacyDbfs)) + + @dataclass class UpdateDisableLegacyFeaturesRequest: """Details required to update a setting.""" @@ -3447,6 +3535,91 @@ def update(self, allow_missing: bool, setting: DisableLegacyAccess, return DisableLegacyAccess.from_dict(res) +class DisableLegacyDbfsAPI: + """When this setting is on, access to DBFS root and DBFS mounts is disallowed (as well as creation of new + mounts). When the setting is off, all DBFS functionality is enabled""" + + def __init__(self, api_client): + self._api = api_client + + def delete(self, *, etag: Optional[str] = None) -> DeleteDisableLegacyDbfsResponse: + """Delete the disable legacy DBFS setting. + + Deletes the disable legacy DBFS setting for a workspace, reverting back to the default. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`DeleteDisableLegacyDbfsResponse` + """ + + query = {} + if etag is not None: query['etag'] = etag + headers = {'Accept': 'application/json', } + + res = self._api.do('DELETE', + '/api/2.0/settings/types/disable_legacy_dbfs/names/default', + query=query, + headers=headers) + return DeleteDisableLegacyDbfsResponse.from_dict(res) + + def get(self, *, etag: Optional[str] = None) -> DisableLegacyDbfs: + """Get the disable legacy DBFS setting. + + Gets the disable legacy DBFS setting. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`DisableLegacyDbfs` + """ + + query = {} + if etag is not None: query['etag'] = etag + headers = {'Accept': 'application/json', } + + res = self._api.do('GET', + '/api/2.0/settings/types/disable_legacy_dbfs/names/default', + query=query, + headers=headers) + return DisableLegacyDbfs.from_dict(res) + + def update(self, allow_missing: bool, setting: DisableLegacyDbfs, field_mask: str) -> DisableLegacyDbfs: + """Update the disable legacy DBFS setting. + + Updates the disable legacy DBFS setting for the workspace. + + :param allow_missing: bool + This should always be set to true for Settings API. Added for AIP compliance. + :param setting: :class:`DisableLegacyDbfs` + :param field_mask: str + Field mask is required to be passed into the PATCH request. Field mask specifies which fields of the + setting payload will be updated. The field mask needs to be supplied as single string. To specify + multiple fields in the field mask, use comma as the separator (no space). + + :returns: :class:`DisableLegacyDbfs` + """ + body = {} + if allow_missing is not None: body['allow_missing'] = allow_missing + if field_mask is not None: body['field_mask'] = field_mask + if setting is not None: body['setting'] = setting.as_dict() + headers = {'Accept': 'application/json', 'Content-Type': 'application/json', } + + res = self._api.do('PATCH', + '/api/2.0/settings/types/disable_legacy_dbfs/names/default', + body=body, + headers=headers) + return DisableLegacyDbfs.from_dict(res) + + class DisableLegacyFeaturesAPI: """Disable legacy features for new Databricks workspaces. @@ -4411,6 +4584,7 @@ def __init__(self, api_client): self._compliance_security_profile = ComplianceSecurityProfileAPI(self._api) self._default_namespace = DefaultNamespaceAPI(self._api) self._disable_legacy_access = DisableLegacyAccessAPI(self._api) + self._disable_legacy_dbfs = DisableLegacyDbfsAPI(self._api) self._enhanced_security_monitoring = EnhancedSecurityMonitoringAPI(self._api) self._restrict_workspace_admins = RestrictWorkspaceAdminsAPI(self._api) @@ -4434,6 +4608,11 @@ def disable_legacy_access(self) -> DisableLegacyAccessAPI: """'Disabling legacy access' has the following impacts: 1.""" return self._disable_legacy_access + @property + def disable_legacy_dbfs(self) -> DisableLegacyDbfsAPI: + """When this setting is on, access to DBFS root and DBFS mounts is disallowed (as well as creation of new mounts).""" + return self._disable_legacy_dbfs + @property def enhanced_security_monitoring(self) -> EnhancedSecurityMonitoringAPI: """Controls whether enhanced security monitoring is enabled for the current workspace.""" diff --git a/databricks/sdk/service/sql.py b/databricks/sdk/service/sql.py index 348a27123..4f0e49c77 100755 --- a/databricks/sdk/service/sql.py +++ b/databricks/sdk/service/sql.py @@ -454,6 +454,9 @@ def from_dict(cls, d: Dict[str, any]) -> CancelExecutionResponse: @dataclass class Channel: + """Configures the channel name and DBSQL version of the warehouse. CHANNEL_NAME_CUSTOM should be + chosen only when `dbsql_version` is specified.""" + dbsql_version: Optional[str] = None name: Optional[ChannelName] = None @@ -499,7 +502,6 @@ class ChannelName(Enum): CHANNEL_NAME_CURRENT = 'CHANNEL_NAME_CURRENT' CHANNEL_NAME_CUSTOM = 'CHANNEL_NAME_CUSTOM' CHANNEL_NAME_PREVIEW = 'CHANNEL_NAME_PREVIEW' - CHANNEL_NAME_PREVIOUS = 'CHANNEL_NAME_PREVIOUS' CHANNEL_NAME_UNSPECIFIED = 'CHANNEL_NAME_UNSPECIFIED' @@ -827,7 +829,8 @@ class CreateWarehouseRequest: """The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped. - Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop. + Supported values: - Must be >= 0 mins for serverless warehouses - Must be == 0 or >= 10 mins for + non-serverless warehouses - 0 indicates no autostop. Defaults to 120 mins""" @@ -6866,7 +6869,8 @@ def create( The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped. - Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop. + Supported values: - Must be >= 0 mins for serverless warehouses - Must be == 0 or >= 10 mins for + non-serverless warehouses - 0 indicates no autostop. Defaults to 120 mins :param channel: :class:`Channel` (optional) diff --git a/databricks/sdk/service/workspace.py b/databricks/sdk/service/workspace.py index 6c12c6039..7c8bfbd5e 100755 --- a/databricks/sdk/service/workspace.py +++ b/databricks/sdk/service/workspace.py @@ -1862,8 +1862,8 @@ def list(self, path_prefix: Optional[str] = None) -> Iterator[RepoInfo]: """Get repos. - Returns repos that the calling user has Manage permissions on. Results are paginated with each page - containing twenty repos. + Returns repos that the calling user has Manage permissions on. Use `next_page_token` to iterate + through additional pages. :param next_page_token: str (optional) Token used to get the next page of results. If not specified, returns the first page of results as diff --git a/docs/account/settings/disable_legacy_features.rst b/docs/account/settings/disable_legacy_features.rst new file mode 100644 index 000000000..d7f1db9d3 --- /dev/null +++ b/docs/account/settings/disable_legacy_features.rst @@ -0,0 +1,60 @@ +``a.settings.disable_legacy_features``: Disable Legacy Features +=============================================================== +.. currentmodule:: databricks.sdk.service.settings + +.. py:class:: DisableLegacyFeaturesAPI + + Disable legacy features for new Databricks workspaces. + + For newly created workspaces: 1. Disables the use of DBFS root and mounts. 2. Hive Metastore will not be + provisioned. 3. Disables the use of ‘No-isolation clusters’. 4. Disables Databricks Runtime versions + prior to 13.3LTS. + + .. py:method:: delete( [, etag: Optional[str]]) -> DeleteDisableLegacyFeaturesResponse + + Delete the disable legacy features setting. + + Deletes the disable legacy features setting. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`DeleteDisableLegacyFeaturesResponse` + + + .. py:method:: get( [, etag: Optional[str]]) -> DisableLegacyFeatures + + Get the disable legacy features setting. + + Gets the value of the disable legacy features setting. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`DisableLegacyFeatures` + + + .. py:method:: update(allow_missing: bool, setting: DisableLegacyFeatures, field_mask: str) -> DisableLegacyFeatures + + Update the disable legacy features setting. + + Updates the value of the disable legacy features setting. + + :param allow_missing: bool + This should always be set to true for Settings API. Added for AIP compliance. + :param setting: :class:`DisableLegacyFeatures` + :param field_mask: str + Field mask is required to be passed into the PATCH request. Field mask specifies which fields of the + setting payload will be updated. The field mask needs to be supplied as single string. To specify + multiple fields in the field mask, use comma as the separator (no space). + + :returns: :class:`DisableLegacyFeatures` + \ No newline at end of file diff --git a/docs/account/settings/index.rst b/docs/account/settings/index.rst index 2c53b1afa..abf97c6a0 100644 --- a/docs/account/settings/index.rst +++ b/docs/account/settings/index.rst @@ -11,5 +11,6 @@ Manage security settings for Accounts and Workspaces network_connectivity settings csp_enablement_account + disable_legacy_features esm_enablement_account personal_compute \ No newline at end of file diff --git a/docs/account/settings/settings.rst b/docs/account/settings/settings.rst index 9ef26a1ee..3df647279 100644 --- a/docs/account/settings/settings.rst +++ b/docs/account/settings/settings.rst @@ -16,6 +16,15 @@ This settings can be disabled so that new workspaces do not have compliance security profile enabled by default. + .. py:property:: disable_legacy_features + :type: DisableLegacyFeaturesAPI + + Disable legacy features for new Databricks workspaces. + + For newly created workspaces: 1. Disables the use of DBFS root and mounts. 2. Hive Metastore will not be + provisioned. 3. Disables the use of ‘No-isolation clusters’. 4. Disables Databricks Runtime versions + prior to 13.3LTS. + .. py:property:: esm_enablement_account :type: EsmEnablementAccountAPI diff --git a/docs/dbdataclasses/apps.rst b/docs/dbdataclasses/apps.rst index 827a563b8..2d522c625 100644 --- a/docs/dbdataclasses/apps.rst +++ b/docs/dbdataclasses/apps.rst @@ -34,15 +34,15 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: AppDeploymentState + .. py:attribute:: CANCELLED + :value: "CANCELLED" + .. py:attribute:: FAILED :value: "FAILED" .. py:attribute:: IN_PROGRESS :value: "IN_PROGRESS" - .. py:attribute:: STOPPED - :value: "STOPPED" - .. py:attribute:: SUCCEEDED :value: "SUCCEEDED" @@ -76,30 +76,117 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. py:class:: AppState +.. autoclass:: AppResource + :members: + :undoc-members: - .. py:attribute:: CREATING - :value: "CREATING" +.. autoclass:: AppResourceJob + :members: + :undoc-members: - .. py:attribute:: DELETED - :value: "DELETED" +.. py:class:: AppResourceJobJobPermission - .. py:attribute:: DELETING - :value: "DELETING" + .. py:attribute:: CAN_MANAGE + :value: "CAN_MANAGE" - .. py:attribute:: ERROR - :value: "ERROR" + .. py:attribute:: CAN_MANAGE_RUN + :value: "CAN_MANAGE_RUN" - .. py:attribute:: IDLE - :value: "IDLE" + .. py:attribute:: CAN_VIEW + :value: "CAN_VIEW" + + .. py:attribute:: IS_OWNER + :value: "IS_OWNER" + +.. autoclass:: AppResourceSecret + :members: + :undoc-members: + +.. py:class:: AppResourceSecretSecretPermission + + Permission to grant on the secret scope. Supported permissions are: "READ", "WRITE", "MANAGE". + + .. py:attribute:: MANAGE + :value: "MANAGE" + + .. py:attribute:: READ + :value: "READ" + + .. py:attribute:: WRITE + :value: "WRITE" + +.. autoclass:: AppResourceServingEndpoint + :members: + :undoc-members: + +.. py:class:: AppResourceServingEndpointServingEndpointPermission + + .. py:attribute:: CAN_MANAGE + :value: "CAN_MANAGE" + + .. py:attribute:: CAN_QUERY + :value: "CAN_QUERY" + + .. py:attribute:: CAN_VIEW + :value: "CAN_VIEW" + +.. autoclass:: AppResourceSqlWarehouse + :members: + :undoc-members: + +.. py:class:: AppResourceSqlWarehouseSqlWarehousePermission + + .. py:attribute:: CAN_MANAGE + :value: "CAN_MANAGE" + + .. py:attribute:: CAN_USE + :value: "CAN_USE" + + .. py:attribute:: IS_OWNER + :value: "IS_OWNER" + +.. py:class:: ApplicationState + + .. py:attribute:: CRASHED + :value: "CRASHED" + + .. py:attribute:: DEPLOYING + :value: "DEPLOYING" .. py:attribute:: RUNNING :value: "RUNNING" + .. py:attribute:: UNAVAILABLE + :value: "UNAVAILABLE" + +.. autoclass:: ApplicationStatus + :members: + :undoc-members: + +.. py:class:: ComputeState + + .. py:attribute:: ACTIVE + :value: "ACTIVE" + + .. py:attribute:: DELETING + :value: "DELETING" + + .. py:attribute:: ERROR + :value: "ERROR" + .. py:attribute:: STARTING :value: "STARTING" -.. autoclass:: AppStatus + .. py:attribute:: STOPPED + :value: "STOPPED" + + .. py:attribute:: STOPPING + :value: "STOPPING" + + .. py:attribute:: UPDATING + :value: "UPDATING" + +.. autoclass:: ComputeStatus :members: :undoc-members: @@ -111,10 +198,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: DeleteResponse - :members: - :undoc-members: - .. autoclass:: GetAppPermissionLevelsResponse :members: :undoc-members: @@ -135,10 +218,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: StopAppResponse - :members: - :undoc-members: - .. autoclass:: UpdateAppRequest :members: :undoc-members: diff --git a/docs/dbdataclasses/catalog.rst b/docs/dbdataclasses/catalog.rst index 4f9c651d2..b0f4f838e 100644 --- a/docs/dbdataclasses/catalog.rst +++ b/docs/dbdataclasses/catalog.rst @@ -65,6 +65,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: AwsCredentials + :members: + :undoc-members: + .. autoclass:: AwsIamRoleRequest :members: :undoc-members: @@ -85,6 +89,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: AzureUserDelegationSas + :members: + :undoc-members: + .. autoclass:: CancelRefreshResponse :members: :undoc-members: @@ -261,6 +269,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: CONNECTION_GLUE :value: "CONNECTION_GLUE" + .. py:attribute:: CONNECTION_HTTP_BEARER + :value: "CONNECTION_HTTP_BEARER" + .. py:attribute:: CONNECTION_MYSQL :value: "CONNECTION_MYSQL" @@ -298,6 +309,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: HIVE_METASTORE :value: "HIVE_METASTORE" + .. py:attribute:: HTTP + :value: "HTTP" + .. py:attribute:: MYSQL :value: "MYSQL" @@ -421,6 +435,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo The type of credential. + .. py:attribute:: BEARER_TOKEN + :value: "BEARER_TOKEN" + .. py:attribute:: USERNAME_PASSWORD :value: "USERNAME_PASSWORD" @@ -662,6 +679,18 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: PARAM :value: "PARAM" +.. autoclass:: GcpOauthToken + :members: + :undoc-members: + +.. autoclass:: GenerateTemporaryTableCredentialRequest + :members: + :undoc-members: + +.. autoclass:: GenerateTemporaryTableCredentialResponse + :members: + :undoc-members: + .. py:class:: GetBindingsSecurableType .. py:attribute:: CATALOG @@ -1176,6 +1205,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: R2Credentials + :members: + :undoc-members: + .. autoclass:: RegenerateDashboardRequest :members: :undoc-members: @@ -1304,6 +1337,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. py:class:: TableOperation + + .. py:attribute:: READ + :value: "READ" + + .. py:attribute:: READ_WRITE + :value: "READ_WRITE" + .. autoclass:: TableRowFilter :members: :undoc-members: diff --git a/docs/dbdataclasses/compute.rst b/docs/dbdataclasses/compute.rst index f4e175920..0066f0374 100644 --- a/docs/dbdataclasses/compute.rst +++ b/docs/dbdataclasses/compute.rst @@ -1022,7 +1022,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: RuntimeEngine - Decides which runtime engine to be use, e.g. Standard vs. Photon. If unspecified, the runtime engine is inferred from spark_version. + Determines the cluster's runtime engine, either standard or Photon. + This field is not compatible with legacy `spark_version` values that contain `-photon-`. Remove `-photon-` from the `spark_version` and set `runtime_engine` to `PHOTON`. + If left unspecified, the runtime engine defaults to standard unless the spark_version contains -photon-, in which case Photon will be used. .. py:attribute:: NULL :value: "NULL" diff --git a/docs/dbdataclasses/dashboards.rst b/docs/dbdataclasses/dashboards.rst index 8765ee695..192095548 100644 --- a/docs/dbdataclasses/dashboards.rst +++ b/docs/dbdataclasses/dashboards.rst @@ -207,7 +207,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: MessageStatus - MesssageStatus. The possible values are: * `FETCHING_METADATA`: Fetching metadata from the data sources. * `ASKING_AI`: Waiting for the LLM to respond to the users question. * `EXECUTING_QUERY`: Executing AI provided SQL query. Get the SQL query result by calling [getMessageQueryResult](:method:genie/getMessageQueryResult) API. **Important: The message status will stay in the `EXECUTING_QUERY` until a client calls [getMessageQueryResult](:method:genie/getMessageQueryResult)**. * `FAILED`: Generating a response or the executing the query failed. Please see `error` field. * `COMPLETED`: Message processing is completed. Results are in the `attachments` field. Get the SQL query result by calling [getMessageQueryResult](:method:genie/getMessageQueryResult) API. * `SUBMITTED`: Message has been submitted. * `QUERY_RESULT_EXPIRED`: SQL result is not available anymore. The user needs to execute the query again. * `CANCELLED`: Message has been cancelled. + MesssageStatus. The possible values are: * `FETCHING_METADATA`: Fetching metadata from the data sources. * `FILTERING_CONTEXT`: Running smart context step to determine relevant context. * `ASKING_AI`: Waiting for the LLM to respond to the users question. * `EXECUTING_QUERY`: Executing AI provided SQL query. Get the SQL query result by calling [getMessageQueryResult](:method:genie/getMessageQueryResult) API. **Important: The message status will stay in the `EXECUTING_QUERY` until a client calls [getMessageQueryResult](:method:genie/getMessageQueryResult)**. * `FAILED`: Generating a response or the executing the query failed. Please see `error` field. * `COMPLETED`: Message processing is completed. Results are in the `attachments` field. Get the SQL query result by calling [getMessageQueryResult](:method:genie/getMessageQueryResult) API. * `SUBMITTED`: Message has been submitted. * `QUERY_RESULT_EXPIRED`: SQL result is not available anymore. The user needs to execute the query again. * `CANCELLED`: Message has been cancelled. .. py:attribute:: ASKING_AI :value: "ASKING_AI" @@ -227,6 +227,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: FETCHING_METADATA :value: "FETCHING_METADATA" + .. py:attribute:: FILTERING_CONTEXT + :value: "FILTERING_CONTEXT" + .. py:attribute:: QUERY_RESULT_EXPIRED :value: "QUERY_RESULT_EXPIRED" diff --git a/docs/dbdataclasses/jobs.rst b/docs/dbdataclasses/jobs.rst index b1b05ec18..3aa0db043 100644 --- a/docs/dbdataclasses/jobs.rst +++ b/docs/dbdataclasses/jobs.rst @@ -601,11 +601,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: RunResultState - A value indicating the run's result. The possible values are: * `SUCCESS`: The task completed successfully. * `FAILED`: The task completed with an error. * `TIMEDOUT`: The run was stopped after reaching the timeout. * `CANCELED`: The run was canceled at user request. * `MAXIMUM_CONCURRENT_RUNS_REACHED`: The run was skipped because the maximum concurrent runs were reached. * `EXCLUDED`: The run was skipped because the necessary conditions were not met. * `SUCCESS_WITH_FAILURES`: The job run completed successfully with some failures; leaf tasks were successful. * `UPSTREAM_FAILED`: The run was skipped because of an upstream failure. * `UPSTREAM_CANCELED`: The run was skipped because an upstream task was canceled. + A value indicating the run's result. The possible values are: * `SUCCESS`: The task completed successfully. * `FAILED`: The task completed with an error. * `TIMEDOUT`: The run was stopped after reaching the timeout. * `CANCELED`: The run was canceled at user request. * `MAXIMUM_CONCURRENT_RUNS_REACHED`: The run was skipped because the maximum concurrent runs were reached. * `EXCLUDED`: The run was skipped because the necessary conditions were not met. * `SUCCESS_WITH_FAILURES`: The job run completed successfully with some failures; leaf tasks were successful. * `UPSTREAM_FAILED`: The run was skipped because of an upstream failure. * `UPSTREAM_CANCELED`: The run was skipped because an upstream task was canceled. * `DISABLED`: The run was skipped because it was disabled explicitly by the user. .. py:attribute:: CANCELED :value: "CANCELED" + .. py:attribute:: DISABLED + :value: "DISABLED" + .. py:attribute:: EXCLUDED :value: "EXCLUDED" @@ -796,7 +799,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: TerminationCodeCode - The code indicates why the run was terminated. Additional codes might be introduced in future releases. * `SUCCESS`: The run was completed successfully. * `CANCELED`: The run was canceled during execution by the Databricks platform; for example, if the maximum run duration was exceeded. * `SKIPPED`: Run was never executed, for example, if the upstream task run failed, the dependency type condition was not met, or there were no material tasks to execute. * `INTERNAL_ERROR`: The run encountered an unexpected error. Refer to the state message for further details. * `DRIVER_ERROR`: The run encountered an error while communicating with the Spark Driver. * `CLUSTER_ERROR`: The run failed due to a cluster error. Refer to the state message for further details. * `REPOSITORY_CHECKOUT_FAILED`: Failed to complete the checkout due to an error when communicating with the third party service. * `INVALID_CLUSTER_REQUEST`: The run failed because it issued an invalid request to start the cluster. * `WORKSPACE_RUN_LIMIT_EXCEEDED`: The workspace has reached the quota for the maximum number of concurrent active runs. Consider scheduling the runs over a larger time frame. * `FEATURE_DISABLED`: The run failed because it tried to access a feature unavailable for the workspace. * `CLUSTER_REQUEST_LIMIT_EXCEEDED`: The number of cluster creation, start, and upsize requests have exceeded the allotted rate limit. Consider spreading the run execution over a larger time frame. * `STORAGE_ACCESS_ERROR`: The run failed due to an error when accessing the customer blob storage. Refer to the state message for further details. * `RUN_EXECUTION_ERROR`: The run was completed with task failures. For more details, refer to the state message or run output. * `UNAUTHORIZED_ERROR`: The run failed due to a permission issue while accessing a resource. Refer to the state message for further details. * `LIBRARY_INSTALLATION_ERROR`: The run failed while installing the user-requested library. Refer to the state message for further details. The causes might include, but are not limited to: The provided library is invalid, there are insufficient permissions to install the library, and so forth. * `MAX_CONCURRENT_RUNS_EXCEEDED`: The scheduled run exceeds the limit of maximum concurrent runs set for the job. * `MAX_SPARK_CONTEXTS_EXCEEDED`: The run is scheduled on a cluster that has already reached the maximum number of contexts it is configured to create. See: [Link]. * `RESOURCE_NOT_FOUND`: A resource necessary for run execution does not exist. Refer to the state message for further details. * `INVALID_RUN_CONFIGURATION`: The run failed due to an invalid configuration. Refer to the state message for further details. * `CLOUD_FAILURE`: The run failed due to a cloud provider issue. Refer to the state message for further details. * `MAX_JOB_QUEUE_SIZE_EXCEEDED`: The run was skipped due to reaching the job level queue size limit. + The code indicates why the run was terminated. Additional codes might be introduced in future releases. * `SUCCESS`: The run was completed successfully. * `USER_CANCELED`: The run was successfully canceled during execution by a user. * `CANCELED`: The run was canceled during execution by the Databricks platform; for example, if the maximum run duration was exceeded. * `SKIPPED`: Run was never executed, for example, if the upstream task run failed, the dependency type condition was not met, or there were no material tasks to execute. * `INTERNAL_ERROR`: The run encountered an unexpected error. Refer to the state message for further details. * `DRIVER_ERROR`: The run encountered an error while communicating with the Spark Driver. * `CLUSTER_ERROR`: The run failed due to a cluster error. Refer to the state message for further details. * `REPOSITORY_CHECKOUT_FAILED`: Failed to complete the checkout due to an error when communicating with the third party service. * `INVALID_CLUSTER_REQUEST`: The run failed because it issued an invalid request to start the cluster. * `WORKSPACE_RUN_LIMIT_EXCEEDED`: The workspace has reached the quota for the maximum number of concurrent active runs. Consider scheduling the runs over a larger time frame. * `FEATURE_DISABLED`: The run failed because it tried to access a feature unavailable for the workspace. * `CLUSTER_REQUEST_LIMIT_EXCEEDED`: The number of cluster creation, start, and upsize requests have exceeded the allotted rate limit. Consider spreading the run execution over a larger time frame. * `STORAGE_ACCESS_ERROR`: The run failed due to an error when accessing the customer blob storage. Refer to the state message for further details. * `RUN_EXECUTION_ERROR`: The run was completed with task failures. For more details, refer to the state message or run output. * `UNAUTHORIZED_ERROR`: The run failed due to a permission issue while accessing a resource. Refer to the state message for further details. * `LIBRARY_INSTALLATION_ERROR`: The run failed while installing the user-requested library. Refer to the state message for further details. The causes might include, but are not limited to: The provided library is invalid, there are insufficient permissions to install the library, and so forth. * `MAX_CONCURRENT_RUNS_EXCEEDED`: The scheduled run exceeds the limit of maximum concurrent runs set for the job. * `MAX_SPARK_CONTEXTS_EXCEEDED`: The run is scheduled on a cluster that has already reached the maximum number of contexts it is configured to create. See: [Link]. * `RESOURCE_NOT_FOUND`: A resource necessary for run execution does not exist. Refer to the state message for further details. * `INVALID_RUN_CONFIGURATION`: The run failed due to an invalid configuration. Refer to the state message for further details. * `CLOUD_FAILURE`: The run failed due to a cloud provider issue. Refer to the state message for further details. * `MAX_JOB_QUEUE_SIZE_EXCEEDED`: The run was skipped due to reaching the job level queue size limit. [Link]: https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now .. py:attribute:: CANCELED @@ -859,6 +862,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: UNAUTHORIZED_ERROR :value: "UNAUTHORIZED_ERROR" + .. py:attribute:: USER_CANCELED + :value: "USER_CANCELED" + .. py:attribute:: WORKSPACE_RUN_LIMIT_EXCEEDED :value: "WORKSPACE_RUN_LIMIT_EXCEEDED" diff --git a/docs/dbdataclasses/serving.rst b/docs/dbdataclasses/serving.rst index 23ef3c257..3deefc873 100644 --- a/docs/dbdataclasses/serving.rst +++ b/docs/dbdataclasses/serving.rst @@ -8,6 +8,61 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: AiGatewayConfig + :members: + :undoc-members: + +.. autoclass:: AiGatewayGuardrailParameters + :members: + :undoc-members: + +.. autoclass:: AiGatewayGuardrailPiiBehavior + :members: + :undoc-members: + +.. py:class:: AiGatewayGuardrailPiiBehaviorBehavior + + Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is set for the input guardrail and the request contains PII, the request is not sent to the model server and 400 status code is returned; if 'BLOCK' is set for the output guardrail and the model response contains PII, the PII info in the response is redacted and 400 status code is returned. + + .. py:attribute:: BLOCK + :value: "BLOCK" + + .. py:attribute:: NONE + :value: "NONE" + +.. autoclass:: AiGatewayGuardrails + :members: + :undoc-members: + +.. autoclass:: AiGatewayInferenceTableConfig + :members: + :undoc-members: + +.. autoclass:: AiGatewayRateLimit + :members: + :undoc-members: + +.. py:class:: AiGatewayRateLimitKey + + Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified. + + .. py:attribute:: ENDPOINT + :value: "ENDPOINT" + + .. py:attribute:: USER + :value: "USER" + +.. py:class:: AiGatewayRateLimitRenewalPeriod + + Renewal period field for a rate limit. Currently, only 'minute' is supported. + + .. py:attribute:: MINUTE + :value: "MINUTE" + +.. autoclass:: AiGatewayUsageTrackingConfig + :members: + :undoc-members: + .. autoclass:: AmazonBedrockConfig :members: :undoc-members: @@ -226,6 +281,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: PutAiGatewayResponse + :members: + :undoc-members: + .. autoclass:: PutResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/settings.rst b/docs/dbdataclasses/settings.rst index 0031512e7..12043e3c5 100644 --- a/docs/dbdataclasses/settings.rst +++ b/docs/dbdataclasses/settings.rst @@ -8,6 +8,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: BooleanMessage + :members: + :undoc-members: + .. autoclass:: ClusterAutoRestartMessage :members: :undoc-members: @@ -188,6 +192,18 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: DeleteDisableLegacyAccessResponse + :members: + :undoc-members: + +.. autoclass:: DeleteDisableLegacyDbfsResponse + :members: + :undoc-members: + +.. autoclass:: DeleteDisableLegacyFeaturesResponse + :members: + :undoc-members: + .. autoclass:: DeleteNetworkConnectivityConfigurationResponse :members: :undoc-members: @@ -221,6 +237,18 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: WEBHOOK :value: "WEBHOOK" +.. autoclass:: DisableLegacyAccess + :members: + :undoc-members: + +.. autoclass:: DisableLegacyDbfs + :members: + :undoc-members: + +.. autoclass:: DisableLegacyFeatures + :members: + :undoc-members: + .. autoclass:: EmailConfig :members: :undoc-members: @@ -509,6 +537,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo The type of token request. As of now, only `AZURE_ACTIVE_DIRECTORY_TOKEN` is supported. + .. py:attribute:: ARCLIGHT_AZURE_EXCHANGE_TOKEN + :value: "ARCLIGHT_AZURE_EXCHANGE_TOKEN" + .. py:attribute:: AZURE_ACTIVE_DIRECTORY_TOKEN :value: "AZURE_ACTIVE_DIRECTORY_TOKEN" @@ -528,6 +559,18 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: UpdateDisableLegacyAccessRequest + :members: + :undoc-members: + +.. autoclass:: UpdateDisableLegacyDbfsRequest + :members: + :undoc-members: + +.. autoclass:: UpdateDisableLegacyFeaturesRequest + :members: + :undoc-members: + .. autoclass:: UpdateEnhancedSecurityMonitoringSettingRequest :members: :undoc-members: diff --git a/docs/dbdataclasses/sql.rst b/docs/dbdataclasses/sql.rst index 255123067..1657146c3 100644 --- a/docs/dbdataclasses/sql.rst +++ b/docs/dbdataclasses/sql.rst @@ -114,16 +114,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: CHANNEL_NAME_PREVIEW :value: "CHANNEL_NAME_PREVIEW" - .. py:attribute:: CHANNEL_NAME_PREVIOUS - :value: "CHANNEL_NAME_PREVIOUS" - .. py:attribute:: CHANNEL_NAME_UNSPECIFIED :value: "CHANNEL_NAME_UNSPECIFIED" -.. autoclass:: ClientCallContext - :members: - :undoc-members: - .. autoclass:: ColumnInfo :members: :undoc-members: @@ -391,20 +384,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: EncodedText - :members: - :undoc-members: - -.. py:class:: EncodedTextEncoding - - Carry text data in different form. - - .. py:attribute:: BASE64 - :value: "BASE64" - - .. py:attribute:: PLAIN - :value: "PLAIN" - .. autoclass:: EndpointConfPair :members: :undoc-members: @@ -744,78 +723,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: QuerySource - :members: - :undoc-members: - -.. autoclass:: QuerySourceDriverInfo - :members: - :undoc-members: - -.. py:class:: QuerySourceEntryPoint - - Spark service that received and processed the query - - .. py:attribute:: DLT - :value: "DLT" - - .. py:attribute:: SPARK_CONNECT - :value: "SPARK_CONNECT" - - .. py:attribute:: THRIFT_SERVER - :value: "THRIFT_SERVER" - -.. py:class:: QuerySourceJobManager - - Copied from elastic-spark-common/api/messages/manager.proto with enum values changed by 1 to accommodate JOB_MANAGER_UNSPECIFIED - - .. py:attribute:: APP_SYSTEM_TABLE - :value: "APP_SYSTEM_TABLE" - - .. py:attribute:: AUTOML - :value: "AUTOML" - - .. py:attribute:: AUTO_MAINTENANCE - :value: "AUTO_MAINTENANCE" - - .. py:attribute:: CLEAN_ROOMS - :value: "CLEAN_ROOMS" - - .. py:attribute:: DATA_MONITORING - :value: "DATA_MONITORING" - - .. py:attribute:: DATA_SHARING - :value: "DATA_SHARING" - - .. py:attribute:: ENCRYPTION - :value: "ENCRYPTION" - - .. py:attribute:: FABRIC_CRAWLER - :value: "FABRIC_CRAWLER" - - .. py:attribute:: JOBS - :value: "JOBS" - - .. py:attribute:: LAKEVIEW - :value: "LAKEVIEW" - - .. py:attribute:: MANAGED_RAG - :value: "MANAGED_RAG" - - .. py:attribute:: SCHEDULED_MV_REFRESH - :value: "SCHEDULED_MV_REFRESH" - - .. py:attribute:: TESTING - :value: "TESTING" - -.. py:class:: QuerySourceTrigger - - .. py:attribute:: MANUAL - :value: "MANUAL" - - .. py:attribute:: SCHEDULED - :value: "SCHEDULED" - .. py:class:: QueryStatementType .. py:attribute:: ALTER @@ -950,10 +857,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: VIEWER :value: "VIEWER" -.. autoclass:: ServerlessChannelInfo - :members: - :undoc-members: - .. autoclass:: ServiceError :members: :undoc-members: diff --git a/docs/dbdataclasses/workspace.rst b/docs/dbdataclasses/workspace.rst index eaf70f9e0..9ff3eb66b 100644 --- a/docs/dbdataclasses/workspace.rst +++ b/docs/dbdataclasses/workspace.rst @@ -23,7 +23,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: CreateCredentials +.. autoclass:: CreateCredentialsRequest :members: :undoc-members: @@ -31,7 +31,11 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: CreateRepo +.. autoclass:: CreateRepoRequest + :members: + :undoc-members: + +.. autoclass:: CreateRepoResponse :members: :undoc-members: @@ -59,6 +63,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: DeleteCredentialsResponse + :members: + :undoc-members: + +.. autoclass:: DeleteRepoResponse + :members: + :undoc-members: + .. autoclass:: DeleteResponse :members: :undoc-members: @@ -111,6 +123,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: GetRepoResponse + :members: + :undoc-members: + .. autoclass:: GetSecretResponse :members: :undoc-members: @@ -171,6 +187,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ListCredentialsResponse + :members: + :undoc-members: + .. autoclass:: ListReposResponse :members: :undoc-members: @@ -306,15 +326,19 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: UpdateCredentials +.. autoclass:: UpdateCredentialsRequest + :members: + :undoc-members: + +.. autoclass:: UpdateCredentialsResponse :members: :undoc-members: -.. autoclass:: UpdateRepo +.. autoclass:: UpdateRepoRequest :members: :undoc-members: -.. autoclass:: UpdateResponse +.. autoclass:: UpdateRepoResponse :members: :undoc-members: diff --git a/docs/workspace/apps/apps.rst b/docs/workspace/apps/apps.rst index 455bb81cc..774e75b8b 100644 --- a/docs/workspace/apps/apps.rst +++ b/docs/workspace/apps/apps.rst @@ -7,7 +7,7 @@ Apps run directly on a customer’s Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on. - .. py:method:: create(name: str [, description: Optional[str]]) -> Wait[App] + .. py:method:: create(name: str [, description: Optional[str], resources: Optional[List[AppResource]]]) -> Wait[App] Create an app. @@ -18,16 +18,18 @@ must be unique within the workspace. :param description: str (optional) The description of the app. + :param resources: List[:class:`AppResource`] (optional) + Resources for the app. :returns: Long-running operation waiter for :class:`App`. - See :method:wait_get_app_idle for more details. + See :method:wait_get_app_active for more details. - .. py:method:: create_and_wait(name: str [, description: Optional[str], timeout: datetime.timedelta = 0:20:00]) -> App + .. py:method:: create_and_wait(name: str [, description: Optional[str], resources: Optional[List[AppResource]], timeout: datetime.timedelta = 0:20:00]) -> App - .. py:method:: delete(name: str) + .. py:method:: delete(name: str) -> App Delete an app. @@ -36,10 +38,10 @@ :param name: str The name of the app. - + :returns: :class:`App` - .. py:method:: deploy(app_name: str, source_code_path: str [, mode: Optional[AppDeploymentMode]]) -> Wait[AppDeployment] + .. py:method:: deploy(app_name: str [, deployment_id: Optional[str], mode: Optional[AppDeploymentMode], source_code_path: Optional[str]]) -> Wait[AppDeployment] Create an app deployment. @@ -47,21 +49,23 @@ :param app_name: str The name of the app. - :param source_code_path: str + :param deployment_id: str (optional) + The unique id of the deployment. + :param mode: :class:`AppDeploymentMode` (optional) + The mode of which the deployment will manage the source code. + :param source_code_path: str (optional) The workspace file system path of the source code used to create the app deployment. This is different from `deployment_artifacts.source_code_path`, which is the path used by the deployed app. The former refers to the original source code location of the app in the workspace during deployment creation, whereas the latter provides a system generated stable snapshotted source code path used by the deployment. - :param mode: :class:`AppDeploymentMode` (optional) - The mode of which the deployment will manage the source code. :returns: Long-running operation waiter for :class:`AppDeployment`. See :method:wait_get_deployment_app_succeeded for more details. - .. py:method:: deploy_and_wait(app_name: str, source_code_path: str [, mode: Optional[AppDeploymentMode], timeout: datetime.timedelta = 0:20:00]) -> AppDeployment + .. py:method:: deploy_and_wait(app_name: str [, deployment_id: Optional[str], mode: Optional[AppDeploymentMode], source_code_path: Optional[str], timeout: datetime.timedelta = 0:20:00]) -> AppDeployment .. py:method:: get(name: str) -> App @@ -157,7 +161,7 @@ :returns: :class:`AppPermissions` - .. py:method:: start(name: str) -> Wait[AppDeployment] + .. py:method:: start(name: str) -> Wait[App] Start an app. @@ -167,14 +171,14 @@ The name of the app. :returns: - Long-running operation waiter for :class:`AppDeployment`. - See :method:wait_get_deployment_app_succeeded for more details. + Long-running operation waiter for :class:`App`. + See :method:wait_get_app_active for more details. - .. py:method:: start_and_wait(name: str, timeout: datetime.timedelta = 0:20:00) -> AppDeployment + .. py:method:: start_and_wait(name: str, timeout: datetime.timedelta = 0:20:00) -> App - .. py:method:: stop(name: str) + .. py:method:: stop(name: str) -> Wait[App] Stop an app. @@ -183,10 +187,15 @@ :param name: str The name of the app. - + :returns: + Long-running operation waiter for :class:`App`. + See :method:wait_get_app_stopped for more details. - .. py:method:: update(name: str [, description: Optional[str]]) -> App + .. py:method:: stop_and_wait(name: str, timeout: datetime.timedelta = 0:20:00) -> App + + + .. py:method:: update(name: str [, description: Optional[str], resources: Optional[List[AppResource]]]) -> App Update an app. @@ -197,6 +206,8 @@ must be unique within the workspace. :param description: str (optional) The description of the app. + :param resources: List[:class:`AppResource`] (optional) + Resources for the app. :returns: :class:`App` @@ -214,7 +225,10 @@ :returns: :class:`AppPermissions` - .. py:method:: wait_get_app_idle(name: str, timeout: datetime.timedelta = 0:20:00, callback: Optional[Callable[[App], None]]) -> App + .. py:method:: wait_get_app_active(name: str, timeout: datetime.timedelta = 0:20:00, callback: Optional[Callable[[App], None]]) -> App + + + .. py:method:: wait_get_app_stopped(name: str, timeout: datetime.timedelta = 0:20:00, callback: Optional[Callable[[App], None]]) -> App .. py:method:: wait_get_deployment_app_succeeded(app_name: str, deployment_id: str, timeout: datetime.timedelta = 0:20:00, callback: Optional[Callable[[AppDeployment], None]]) -> AppDeployment diff --git a/docs/workspace/catalog/index.rst b/docs/workspace/catalog/index.rst index 3bf2522d8..1372ca5a1 100644 --- a/docs/workspace/catalog/index.rst +++ b/docs/workspace/catalog/index.rst @@ -24,5 +24,6 @@ Configure data governance with Unity Catalog for metastores, catalogs, schemas, system_schemas table_constraints tables + temporary_table_credentials volumes workspace_bindings \ No newline at end of file diff --git a/docs/workspace/catalog/tables.rst b/docs/workspace/catalog/tables.rst index 6249f0da1..4cb458b46 100644 --- a/docs/workspace/catalog/tables.rst +++ b/docs/workspace/catalog/tables.rst @@ -45,7 +45,7 @@ :returns: :class:`TableExistsResponse` - .. py:method:: get(full_name: str [, include_browse: Optional[bool], include_delta_metadata: Optional[bool]]) -> TableInfo + .. py:method:: get(full_name: str [, include_browse: Optional[bool], include_delta_metadata: Optional[bool], include_manifest_capabilities: Optional[bool]]) -> TableInfo Usage: @@ -94,11 +94,13 @@ for :param include_delta_metadata: bool (optional) Whether delta metadata should be included in the response. + :param include_manifest_capabilities: bool (optional) + Whether to include a manifest containing capabilities the table has. :returns: :class:`TableInfo` - .. py:method:: list(catalog_name: str, schema_name: str [, include_browse: Optional[bool], include_delta_metadata: Optional[bool], max_results: Optional[int], omit_columns: Optional[bool], omit_properties: Optional[bool], page_token: Optional[str]]) -> Iterator[TableInfo] + .. py:method:: list(catalog_name: str, schema_name: str [, include_browse: Optional[bool], include_delta_metadata: Optional[bool], include_manifest_capabilities: Optional[bool], max_results: Optional[int], omit_columns: Optional[bool], omit_properties: Optional[bool], page_token: Optional[str]]) -> Iterator[TableInfo] Usage: @@ -138,6 +140,8 @@ for :param include_delta_metadata: bool (optional) Whether delta metadata should be included in the response. + :param include_manifest_capabilities: bool (optional) + Whether to include a manifest containing capabilities the table has. :param max_results: int (optional) Maximum number of tables to return. If not set, all the tables are returned (not recommended). - when set to a value greater than 0, the page length is the minimum of this value and a server @@ -153,7 +157,7 @@ :returns: Iterator over :class:`TableInfo` - .. py:method:: list_summaries(catalog_name: str [, max_results: Optional[int], page_token: Optional[str], schema_name_pattern: Optional[str], table_name_pattern: Optional[str]]) -> Iterator[TableSummary] + .. py:method:: list_summaries(catalog_name: str [, include_manifest_capabilities: Optional[bool], max_results: Optional[int], page_token: Optional[str], schema_name_pattern: Optional[str], table_name_pattern: Optional[str]]) -> Iterator[TableSummary] Usage: @@ -192,6 +196,8 @@ :param catalog_name: str Name of parent catalog for tables of interest. + :param include_manifest_capabilities: bool (optional) + Whether to include a manifest containing capabilities the table has. :param max_results: int (optional) Maximum number of summaries for tables to return. If not set, the page length is set to a server configured value (10000, as of 1/5/2024). - when set to a value greater than 0, the page length is diff --git a/docs/workspace/catalog/temporary_table_credentials.rst b/docs/workspace/catalog/temporary_table_credentials.rst new file mode 100644 index 000000000..1acd462b7 --- /dev/null +++ b/docs/workspace/catalog/temporary_table_credentials.rst @@ -0,0 +1,36 @@ +``w.temporary_table_credentials``: Temporary Table Credentials +============================================================== +.. currentmodule:: databricks.sdk.service.catalog + +.. py:class:: TemporaryTableCredentialsAPI + + Temporary Table Credentials refer to short-lived, downscoped credentials used to access cloud storage + locationswhere table data is stored in Databricks. These credentials are employed to provide secure and + time-limitedaccess to data in cloud environments such as AWS, Azure, and Google Cloud. Each cloud provider + has its own typeof credentials: AWS uses temporary session tokens via AWS Security Token Service (STS), + Azure utilizesShared Access Signatures (SAS) for its data storage services, and Google Cloud supports + temporary credentialsthrough OAuth 2.0.Temporary table credentials ensure that data access is limited in + scope and duration, reducing the risk ofunauthorized access or misuse. To use the temporary table + credentials API, a metastore admin needs to enable the external_access_enabled flag (off by default) at + the metastore level, and user needs to be granted the EXTERNAL USE SCHEMA permission at the schema level + by catalog admin. Note that EXTERNAL USE SCHEMA is a schema level permission that can only be granted by + catalog admin explicitly and is not included in schema ownership or ALL PRIVILEGES on the schema for + security reason. + + .. py:method:: generate_temporary_table_credentials( [, operation: Optional[TableOperation], table_id: Optional[str]]) -> GenerateTemporaryTableCredentialResponse + + Generate a temporary table credential. + + Get a short-lived credential for directly accessing the table data on cloud storage. The metastore + must have external_access_enabled flag set to true (default false). The caller must have + EXTERNAL_USE_SCHEMA privilege on the parent schema and this privilege can only be granted by catalog + owners. + + :param operation: :class:`TableOperation` (optional) + The operation performed against the table data, either READ or READ_WRITE. If READ_WRITE is + specified, the credentials returned will have write permissions, otherwise, it will be read only. + :param table_id: str (optional) + UUID of the table to read or write. + + :returns: :class:`GenerateTemporaryTableCredentialResponse` + \ No newline at end of file diff --git a/docs/workspace/compute/clusters.rst b/docs/workspace/compute/clusters.rst index 601b55812..ac52edecb 100644 --- a/docs/workspace/compute/clusters.rst +++ b/docs/workspace/compute/clusters.rst @@ -107,6 +107,11 @@ If Databricks acquires at least 85% of the requested on-demand nodes, cluster creation will succeed. Otherwise the cluster will terminate with an informative error message. + Rather than authoring the cluster's JSON definition from scratch, Databricks recommends filling out + the [create compute UI] and then copying the generated JSON definition from the UI. + + [create compute UI]: https://docs.databricks.com/compute/configure.html + :param spark_version: str The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of available Spark versions can be retrieved by using the :method:clusters/sparkVersions API call. @@ -202,8 +207,13 @@ :param policy_id: str (optional) The ID of the cluster policy used to create the cluster if applicable. :param runtime_engine: :class:`RuntimeEngine` (optional) - Decides which runtime engine to be use, e.g. Standard vs. Photon. If unspecified, the runtime engine - is inferred from spark_version. + Determines the cluster's runtime engine, either standard or Photon. + + This field is not compatible with legacy `spark_version` values that contain `-photon-`. Remove + `-photon-` from the `spark_version` and set `runtime_engine` to `PHOTON`. + + If left unspecified, the runtime engine defaults to standard unless the spark_version contains + -photon-, in which case Photon will be used. :param single_user_name: str (optional) Single user name if data_security_mode is `SINGLE_USER` :param spark_conf: Dict[str,str] (optional) @@ -425,8 +435,13 @@ :param policy_id: str (optional) The ID of the cluster policy used to create the cluster if applicable. :param runtime_engine: :class:`RuntimeEngine` (optional) - Decides which runtime engine to be use, e.g. Standard vs. Photon. If unspecified, the runtime engine - is inferred from spark_version. + Determines the cluster's runtime engine, either standard or Photon. + + This field is not compatible with legacy `spark_version` values that contain `-photon-`. Remove + `-photon-` from the `spark_version` and set `runtime_engine` to `PHOTON`. + + If left unspecified, the runtime engine defaults to standard unless the spark_version contains + -photon-, in which case Photon will be used. :param single_user_name: str (optional) Single user name if data_security_mode is `SINGLE_USER` :param spark_conf: Dict[str,str] (optional) diff --git a/docs/workspace/dashboards/lakeview.rst b/docs/workspace/dashboards/lakeview.rst index 92aa8c0e3..fe358063c 100644 --- a/docs/workspace/dashboards/lakeview.rst +++ b/docs/workspace/dashboards/lakeview.rst @@ -20,7 +20,11 @@ slash. This field is excluded in List Dashboards responses. :param serialized_dashboard: str (optional) The contents of the dashboard in serialized string form. This field is excluded in List Dashboards - responses. + responses. Use the [get dashboard API] to retrieve an example response, which includes the + `serialized_dashboard` field. This field provides the structure of the JSON string that represents + the dashboard's layout and components. + + [get dashboard API]: https://docs.databricks.com/api/workspace/lakeview/get :param warehouse_id: str (optional) The warehouse ID used to run the dashboard. @@ -261,7 +265,11 @@ not been modified since the last read. This field is excluded in List Dashboards responses. :param serialized_dashboard: str (optional) The contents of the dashboard in serialized string form. This field is excluded in List Dashboards - responses. + responses. Use the [get dashboard API] to retrieve an example response, which includes the + `serialized_dashboard` field. This field provides the structure of the JSON string that represents + the dashboard's layout and components. + + [get dashboard API]: https://docs.databricks.com/api/workspace/lakeview/get :param warehouse_id: str (optional) The warehouse ID used to run the dashboard. diff --git a/docs/workspace/jobs/jobs.rst b/docs/workspace/jobs/jobs.rst index c07c8e28e..3c6e0f2e4 100644 --- a/docs/workspace/jobs/jobs.rst +++ b/docs/workspace/jobs/jobs.rst @@ -174,7 +174,10 @@ An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted. :param environments: List[:class:`JobEnvironment`] (optional) - A list of task execution environment specifications that can be referenced by tasks of this job. + A list of task execution environment specifications that can be referenced by serverless tasks of + this job. An environment is required to be present for serverless tasks. For serverless notebook + tasks, the environment is accessible in the notebook environment panel. For other serverless tasks, + the task environment is required to be specified using environment_key in the task settings. :param format: :class:`Format` (optional) Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. When using the Jobs API 2.1 this value is always set to `"MULTI_TASK"`. @@ -211,12 +214,11 @@ :param queue: :class:`QueueSettings` (optional) The queue settings of the job. :param run_as: :class:`JobRunAs` (optional) - Write-only setting, available only in Create/Update/Reset and Submit calls. Specifies the user or - service principal that the job runs as. If not specified, the job runs as the user who created the - job. + Write-only setting. Specifies the user, service principal or group that the job/pipeline runs as. If + not specified, the job/pipeline runs as the user who created the job/pipeline. - Only `user_name` or `service_principal_name` can be specified. If both are specified, an error is - thrown. + Exactly one of `user_name`, `service_principal_name`, `group_name` should be specified. If not, an + error is thrown. :param schedule: :class:`CronSchedule` (optional) An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`. @@ -679,6 +681,7 @@ [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html :param pipeline_params: :class:`PipelineParams` (optional) + Controls whether the pipeline should perform a full refresh :param python_named_params: Dict[str,str] (optional) :param python_params: List[str] (optional) A list of parameters for jobs with Python tasks, for example `"python_params": ["john doe", "35"]`. @@ -868,6 +871,7 @@ [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html :param pipeline_params: :class:`PipelineParams` (optional) + Controls whether the pipeline should perform a full refresh :param python_named_params: Dict[str,str] (optional) :param python_params: List[str] (optional) A list of parameters for jobs with Python tasks, for example `"python_params": ["john doe", "35"]`. diff --git a/docs/workspace/pipelines/pipelines.rst b/docs/workspace/pipelines/pipelines.rst index ce98ac5d4..9801a200e 100644 --- a/docs/workspace/pipelines/pipelines.rst +++ b/docs/workspace/pipelines/pipelines.rst @@ -15,7 +15,7 @@ also enforce data quality with Delta Live Tables expectations. Expectations allow you to define expected data quality and specify how to handle records that fail those expectations. - .. py:method:: create( [, allow_duplicate_names: Optional[bool], catalog: Optional[str], channel: Optional[str], clusters: Optional[List[PipelineCluster]], configuration: Optional[Dict[str, str]], continuous: Optional[bool], deployment: Optional[PipelineDeployment], development: Optional[bool], dry_run: Optional[bool], edition: Optional[str], filters: Optional[Filters], gateway_definition: Optional[IngestionGatewayPipelineDefinition], id: Optional[str], ingestion_definition: Optional[IngestionPipelineDefinition], libraries: Optional[List[PipelineLibrary]], name: Optional[str], notifications: Optional[List[Notifications]], photon: Optional[bool], serverless: Optional[bool], storage: Optional[str], target: Optional[str], trigger: Optional[PipelineTrigger]]) -> CreatePipelineResponse + .. py:method:: create( [, allow_duplicate_names: Optional[bool], budget_policy_id: Optional[str], catalog: Optional[str], channel: Optional[str], clusters: Optional[List[PipelineCluster]], configuration: Optional[Dict[str, str]], continuous: Optional[bool], deployment: Optional[PipelineDeployment], development: Optional[bool], dry_run: Optional[bool], edition: Optional[str], filters: Optional[Filters], gateway_definition: Optional[IngestionGatewayPipelineDefinition], id: Optional[str], ingestion_definition: Optional[IngestionPipelineDefinition], libraries: Optional[List[PipelineLibrary]], name: Optional[str], notifications: Optional[List[Notifications]], photon: Optional[bool], schema: Optional[str], serverless: Optional[bool], storage: Optional[str], target: Optional[str], trigger: Optional[PipelineTrigger]]) -> CreatePipelineResponse Usage: @@ -55,6 +55,8 @@ :param allow_duplicate_names: bool (optional) If false, deployment will fail if name conflicts with that of another pipeline. + :param budget_policy_id: str (optional) + Budget policy of this pipeline. :param catalog: str (optional) A catalog in Unity Catalog to publish data from this pipeline to. If `target` is specified, tables in this pipeline are published to a `target` schema inside `catalog` (for example, @@ -91,6 +93,9 @@ List of notification settings for this pipeline. :param photon: bool (optional) Whether Photon is enabled for this pipeline. + :param schema: str (optional) + The default schema (database) where tables are read from or published to. The presence of this field + implies that the pipeline is in direct publishing mode. :param serverless: bool (optional) Whether serverless compute is enabled for this pipeline. :param storage: str (optional) @@ -371,7 +376,7 @@ .. py:method:: stop_and_wait(pipeline_id: str, timeout: datetime.timedelta = 0:20:00) -> GetPipelineResponse - .. py:method:: update(pipeline_id: str [, allow_duplicate_names: Optional[bool], catalog: Optional[str], channel: Optional[str], clusters: Optional[List[PipelineCluster]], configuration: Optional[Dict[str, str]], continuous: Optional[bool], deployment: Optional[PipelineDeployment], development: Optional[bool], edition: Optional[str], expected_last_modified: Optional[int], filters: Optional[Filters], gateway_definition: Optional[IngestionGatewayPipelineDefinition], id: Optional[str], ingestion_definition: Optional[IngestionPipelineDefinition], libraries: Optional[List[PipelineLibrary]], name: Optional[str], notifications: Optional[List[Notifications]], photon: Optional[bool], serverless: Optional[bool], storage: Optional[str], target: Optional[str], trigger: Optional[PipelineTrigger]]) + .. py:method:: update(pipeline_id: str [, allow_duplicate_names: Optional[bool], budget_policy_id: Optional[str], catalog: Optional[str], channel: Optional[str], clusters: Optional[List[PipelineCluster]], configuration: Optional[Dict[str, str]], continuous: Optional[bool], deployment: Optional[PipelineDeployment], development: Optional[bool], edition: Optional[str], expected_last_modified: Optional[int], filters: Optional[Filters], gateway_definition: Optional[IngestionGatewayPipelineDefinition], id: Optional[str], ingestion_definition: Optional[IngestionPipelineDefinition], libraries: Optional[List[PipelineLibrary]], name: Optional[str], notifications: Optional[List[Notifications]], photon: Optional[bool], schema: Optional[str], serverless: Optional[bool], storage: Optional[str], target: Optional[str], trigger: Optional[PipelineTrigger]]) Usage: @@ -425,6 +430,8 @@ Unique identifier for this pipeline. :param allow_duplicate_names: bool (optional) If false, deployment will fail if name has changed and conflicts the name of another pipeline. + :param budget_policy_id: str (optional) + Budget policy of this pipeline. :param catalog: str (optional) A catalog in Unity Catalog to publish data from this pipeline to. If `target` is specified, tables in this pipeline are published to a `target` schema inside `catalog` (for example, @@ -463,6 +470,9 @@ List of notification settings for this pipeline. :param photon: bool (optional) Whether Photon is enabled for this pipeline. + :param schema: str (optional) + The default schema (database) where tables are read from or published to. The presence of this field + implies that the pipeline is in direct publishing mode. :param serverless: bool (optional) Whether serverless compute is enabled for this pipeline. :param storage: str (optional) diff --git a/docs/workspace/serving/serving_endpoints.rst b/docs/workspace/serving/serving_endpoints.rst index 9244f333a..8e21197a1 100644 --- a/docs/workspace/serving/serving_endpoints.rst +++ b/docs/workspace/serving/serving_endpoints.rst @@ -29,7 +29,7 @@ :returns: :class:`BuildLogsResponse` - .. py:method:: create(name: str, config: EndpointCoreConfigInput [, rate_limits: Optional[List[RateLimit]], route_optimized: Optional[bool], tags: Optional[List[EndpointTag]]]) -> Wait[ServingEndpointDetailed] + .. py:method:: create(name: str, config: EndpointCoreConfigInput [, ai_gateway: Optional[AiGatewayConfig], rate_limits: Optional[List[RateLimit]], route_optimized: Optional[bool], tags: Optional[List[EndpointTag]]]) -> Wait[ServingEndpointDetailed] Create a new serving endpoint. @@ -38,9 +38,12 @@ workspace. An endpoint name can consist of alphanumeric characters, dashes, and underscores. :param config: :class:`EndpointCoreConfigInput` The core config of the serving endpoint. + :param ai_gateway: :class:`AiGatewayConfig` (optional) + The AI Gateway configuration for the serving endpoint. NOTE: only external model endpoints are + supported as of now. :param rate_limits: List[:class:`RateLimit`] (optional) - Rate limits to be applied to the serving endpoint. NOTE: only external and foundation model - endpoints are supported as of now. + Rate limits to be applied to the serving endpoint. NOTE: this field is deprecated, please use AI + Gateway to manage rate limits. :param route_optimized: bool (optional) Enable route optimization for the serving endpoint. :param tags: List[:class:`EndpointTag`] (optional) @@ -51,7 +54,7 @@ See :method:wait_get_serving_endpoint_not_updating for more details. - .. py:method:: create_and_wait(name: str, config: EndpointCoreConfigInput [, rate_limits: Optional[List[RateLimit]], route_optimized: Optional[bool], tags: Optional[List[EndpointTag]], timeout: datetime.timedelta = 0:20:00]) -> ServingEndpointDetailed + .. py:method:: create_and_wait(name: str, config: EndpointCoreConfigInput [, ai_gateway: Optional[AiGatewayConfig], rate_limits: Optional[List[RateLimit]], route_optimized: Optional[bool], tags: Optional[List[EndpointTag]], timeout: datetime.timedelta = 0:20:00]) -> ServingEndpointDetailed .. py:method:: delete(name: str) @@ -168,8 +171,8 @@ Update rate limits of a serving endpoint. - Used to update the rate limits of a serving endpoint. NOTE: only external and foundation model - endpoints are supported as of now. + Used to update the rate limits of a serving endpoint. NOTE: Only foundation model endpoints are + currently supported. For external models, use AI Gateway to manage rate limits. :param name: str The name of the serving endpoint whose rate limits are being updated. This field is required. @@ -179,6 +182,29 @@ :returns: :class:`PutResponse` + .. py:method:: put_ai_gateway(name: str [, guardrails: Optional[AiGatewayGuardrails], inference_table_config: Optional[AiGatewayInferenceTableConfig], rate_limits: Optional[List[AiGatewayRateLimit]], usage_tracking_config: Optional[AiGatewayUsageTrackingConfig]]) -> PutAiGatewayResponse + + Update AI Gateway of a serving endpoint. + + Used to update the AI Gateway of a serving endpoint. NOTE: Only external model endpoints are currently + supported. + + :param name: str + The name of the serving endpoint whose AI Gateway is being updated. This field is required. + :param guardrails: :class:`AiGatewayGuardrails` (optional) + Configuration for AI Guardrails to prevent unwanted data and unsafe data in requests and responses. + :param inference_table_config: :class:`AiGatewayInferenceTableConfig` (optional) + Configuration for payload logging using inference tables. Use these tables to monitor and audit data + being sent to and received from model APIs and to improve model quality. + :param rate_limits: List[:class:`AiGatewayRateLimit`] (optional) + Configuration for rate limits which can be set to limit endpoint traffic. + :param usage_tracking_config: :class:`AiGatewayUsageTrackingConfig` (optional) + Configuration to enable usage tracking using system tables. These tables allow you to monitor + operational usage on endpoints and their associated costs. + + :returns: :class:`PutAiGatewayResponse` + + .. py:method:: query(name: str [, dataframe_records: Optional[List[Any]], dataframe_split: Optional[DataframeSplitInput], extra_params: Optional[Dict[str, str]], input: Optional[Any], inputs: Optional[Any], instances: Optional[List[Any]], max_tokens: Optional[int], messages: Optional[List[ChatMessage]], n: Optional[int], prompt: Optional[Any], stop: Optional[List[str]], stream: Optional[bool], temperature: Optional[float]]) -> QueryEndpointResponse Query a serving endpoint. diff --git a/docs/workspace/settings/disable_legacy_access.rst b/docs/workspace/settings/disable_legacy_access.rst new file mode 100644 index 000000000..c8baba3a7 --- /dev/null +++ b/docs/workspace/settings/disable_legacy_access.rst @@ -0,0 +1,61 @@ +``w.settings.disable_legacy_access``: Disable Legacy Access +=========================================================== +.. currentmodule:: databricks.sdk.service.settings + +.. py:class:: DisableLegacyAccessAPI + + 'Disabling legacy access' has the following impacts: + + 1. Disables direct access to the Hive Metastore. However, you can still access Hive Metastore through HMS + Federation. 2. Disables Fallback Mode (docs link) on any External Location access from the workspace. 3. + Alters DBFS path access to use External Location permissions in place of legacy credentials. 4. Enforces + Unity Catalog access on all path based access. + + .. py:method:: delete( [, etag: Optional[str]]) -> DeleteDisableLegacyAccessResponse + + Delete Legacy Access Disablement Status. + + Deletes legacy access disablement status. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`DeleteDisableLegacyAccessResponse` + + + .. py:method:: get( [, etag: Optional[str]]) -> DisableLegacyAccess + + Retrieve Legacy Access Disablement Status. + + Retrieves legacy access disablement Status. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`DisableLegacyAccess` + + + .. py:method:: update(allow_missing: bool, setting: DisableLegacyAccess, field_mask: str) -> DisableLegacyAccess + + Update Legacy Access Disablement Status. + + Updates legacy access disablement status. + + :param allow_missing: bool + This should always be set to true for Settings API. Added for AIP compliance. + :param setting: :class:`DisableLegacyAccess` + :param field_mask: str + Field mask is required to be passed into the PATCH request. Field mask specifies which fields of the + setting payload will be updated. The field mask needs to be supplied as single string. To specify + multiple fields in the field mask, use comma as the separator (no space). + + :returns: :class:`DisableLegacyAccess` + \ No newline at end of file diff --git a/docs/workspace/settings/disable_legacy_dbfs.rst b/docs/workspace/settings/disable_legacy_dbfs.rst new file mode 100644 index 000000000..ad11fa606 --- /dev/null +++ b/docs/workspace/settings/disable_legacy_dbfs.rst @@ -0,0 +1,57 @@ +``w.settings.disable_legacy_dbfs``: Disable Legacy DBFS +======================================================= +.. currentmodule:: databricks.sdk.service.settings + +.. py:class:: DisableLegacyDbfsAPI + + When this setting is on, access to DBFS root and DBFS mounts is disallowed (as well as creation of new + mounts). When the setting is off, all DBFS functionality is enabled + + .. py:method:: delete( [, etag: Optional[str]]) -> DeleteDisableLegacyDbfsResponse + + Delete the disable legacy DBFS setting. + + Deletes the disable legacy DBFS setting for a workspace, reverting back to the default. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`DeleteDisableLegacyDbfsResponse` + + + .. py:method:: get( [, etag: Optional[str]]) -> DisableLegacyDbfs + + Get the disable legacy DBFS setting. + + Gets the disable legacy DBFS setting. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`DisableLegacyDbfs` + + + .. py:method:: update(allow_missing: bool, setting: DisableLegacyDbfs, field_mask: str) -> DisableLegacyDbfs + + Update the disable legacy DBFS setting. + + Updates the disable legacy DBFS setting for the workspace. + + :param allow_missing: bool + This should always be set to true for Settings API. Added for AIP compliance. + :param setting: :class:`DisableLegacyDbfs` + :param field_mask: str + Field mask is required to be passed into the PATCH request. Field mask specifies which fields of the + setting payload will be updated. The field mask needs to be supplied as single string. To specify + multiple fields in the field mask, use comma as the separator (no space). + + :returns: :class:`DisableLegacyDbfs` + \ No newline at end of file diff --git a/docs/workspace/settings/index.rst b/docs/workspace/settings/index.rst index d513ea9fd..22655853b 100644 --- a/docs/workspace/settings/index.rst +++ b/docs/workspace/settings/index.rst @@ -14,6 +14,8 @@ Manage security settings for Accounts and Workspaces automatic_cluster_update compliance_security_profile default_namespace + disable_legacy_access + disable_legacy_dbfs enhanced_security_monitoring restrict_workspace_admins token_management diff --git a/docs/workspace/settings/settings.rst b/docs/workspace/settings/settings.rst index 55f47dae0..588031926 100644 --- a/docs/workspace/settings/settings.rst +++ b/docs/workspace/settings/settings.rst @@ -34,6 +34,22 @@ This setting requires a restart of clusters and SQL warehouses to take effect. Additionally, the default namespace only applies when using Unity Catalog-enabled compute. + .. py:property:: disable_legacy_access + :type: DisableLegacyAccessAPI + + 'Disabling legacy access' has the following impacts: + + 1. Disables direct access to the Hive Metastore. However, you can still access Hive Metastore through HMS + Federation. 2. Disables Fallback Mode (docs link) on any External Location access from the workspace. 3. + Alters DBFS path access to use External Location permissions in place of legacy credentials. 4. Enforces + Unity Catalog access on all path based access. + + .. py:property:: disable_legacy_dbfs + :type: DisableLegacyDbfsAPI + + When this setting is on, access to DBFS root and DBFS mounts is disallowed (as well as creation of new + mounts). When the setting is off, all DBFS functionality is enabled + .. py:property:: enhanced_security_monitoring :type: EnhancedSecurityMonitoringAPI diff --git a/docs/workspace/sql/statement_execution.rst b/docs/workspace/sql/statement_execution.rst index 4d1337623..716fa4fdc 100644 --- a/docs/workspace/sql/statement_execution.rst +++ b/docs/workspace/sql/statement_execution.rst @@ -82,7 +82,9 @@ are approximate, occur server-side, and cannot account for things such as caller delays and network latency from caller to service. - The system will auto-close a statement after one hour if the client stops polling and thus you must poll at least once an hour. - The results are only available for one hour - after success; polling does not extend this. + after success; polling does not extend this. - The SQL Execution API must be used for the entire lifecycle + of the statement. For example, you cannot use the Jobs API to execute the command, and then the SQL + Execution API to cancel it. [Apache Arrow Columnar]: https://arrow.apache.org/overview/ [Databricks SQL Statement Execution API tutorial]: https://docs.databricks.com/sql/api/sql-execution-tutorial.html diff --git a/docs/workspace/sql/warehouses.rst b/docs/workspace/sql/warehouses.rst index 8a5da4302..58b8a3fc0 100644 --- a/docs/workspace/sql/warehouses.rst +++ b/docs/workspace/sql/warehouses.rst @@ -41,7 +41,8 @@ The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped. - Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop. + Supported values: - Must be >= 0 mins for serverless warehouses - Must be == 0 or >= 10 mins for + non-serverless warehouses - 0 indicates no autostop. Defaults to 120 mins :param channel: :class:`Channel` (optional) diff --git a/docs/workspace/workspace/git_credentials.rst b/docs/workspace/workspace/git_credentials.rst index 490cb16ea..34851e84a 100644 --- a/docs/workspace/workspace/git_credentials.rst +++ b/docs/workspace/workspace/git_credentials.rst @@ -33,9 +33,9 @@ existing credentials, or the DELETE endpoint to delete existing credentials. :param git_provider: str - Git provider. This field is case-insensitive. The available Git providers are gitHub, - bitbucketCloud, gitLab, azureDevOpsServices, gitHubEnterprise, bitbucketServer, - gitLabEnterpriseEdition and awsCodeCommit. + Git provider. This field is case-insensitive. The available Git providers are `gitHub`, + `bitbucketCloud`, `gitLab`, `azureDevOpsServices`, `gitHubEnterprise`, `bitbucketServer`, + `gitLabEnterpriseEdition` and `awsCodeCommit`. :param git_username: str (optional) The username or email provided with your Git provider account, depending on which provider you are using. For GitHub, GitHub Enterprise Server, or Azure DevOps Services, either email or username may @@ -44,8 +44,7 @@ Access Token authentication documentation to see what is supported. :param personal_access_token: str (optional) The personal access token used to authenticate to the corresponding Git provider. For certain - providers, support may exist for other types of scoped access tokens. [Learn more]. The personal - access token used to authenticate to the corresponding Git + providers, support may exist for other types of scoped access tokens. [Learn more]. [Learn more]: https://docs.databricks.com/repos/get-access-tokens-from-git-provider.html @@ -64,7 +63,7 @@ - .. py:method:: get(credential_id: int) -> CredentialInfo + .. py:method:: get(credential_id: int) -> GetCredentialsResponse Usage: @@ -89,7 +88,7 @@ :param credential_id: int The ID for the corresponding credential to access. - :returns: :class:`CredentialInfo` + :returns: :class:`GetCredentialsResponse` .. py:method:: list() -> Iterator[CredentialInfo] @@ -112,7 +111,7 @@ :returns: Iterator over :class:`CredentialInfo` - .. py:method:: update(credential_id: int [, git_provider: Optional[str], git_username: Optional[str], personal_access_token: Optional[str]]) + .. py:method:: update(credential_id: int, git_provider: str [, git_username: Optional[str], personal_access_token: Optional[str]]) Usage: @@ -141,10 +140,10 @@ :param credential_id: int The ID for the corresponding credential to access. - :param git_provider: str (optional) - Git provider. This field is case-insensitive. The available Git providers are gitHub, - bitbucketCloud, gitLab, azureDevOpsServices, gitHubEnterprise, bitbucketServer, - gitLabEnterpriseEdition and awsCodeCommit. + :param git_provider: str + Git provider. This field is case-insensitive. The available Git providers are `gitHub`, + `bitbucketCloud`, `gitLab`, `azureDevOpsServices`, `gitHubEnterprise`, `bitbucketServer`, + `gitLabEnterpriseEdition` and `awsCodeCommit`. :param git_username: str (optional) The username or email provided with your Git provider account, depending on which provider you are using. For GitHub, GitHub Enterprise Server, or Azure DevOps Services, either email or username may @@ -153,8 +152,7 @@ Access Token authentication documentation to see what is supported. :param personal_access_token: str (optional) The personal access token used to authenticate to the corresponding Git provider. For certain - providers, support may exist for other types of scoped access tokens. [Learn more]. The personal - access token used to authenticate to the corresponding Git + providers, support may exist for other types of scoped access tokens. [Learn more]. [Learn more]: https://docs.databricks.com/repos/get-access-tokens-from-git-provider.html diff --git a/docs/workspace/workspace/repos.rst b/docs/workspace/workspace/repos.rst index a5c602a3a..01b1c875f 100644 --- a/docs/workspace/workspace/repos.rst +++ b/docs/workspace/workspace/repos.rst @@ -14,7 +14,7 @@ Within Repos you can develop code in notebooks or other files and follow data science and engineering code development best practices using Git for version control, collaboration, and CI/CD. - .. py:method:: create(url: str, provider: str [, path: Optional[str], sparse_checkout: Optional[SparseCheckout]]) -> RepoInfo + .. py:method:: create(url: str, provider: str [, path: Optional[str], sparse_checkout: Optional[SparseCheckout]]) -> CreateRepoResponse Usage: @@ -42,17 +42,17 @@ :param url: str URL of the Git repository to be linked. :param provider: str - Git provider. This field is case-insensitive. The available Git providers are gitHub, - bitbucketCloud, gitLab, azureDevOpsServices, gitHubEnterprise, bitbucketServer, - gitLabEnterpriseEdition and awsCodeCommit. + Git provider. This field is case-insensitive. The available Git providers are `gitHub`, + `bitbucketCloud`, `gitLab`, `azureDevOpsServices`, `gitHubEnterprise`, `bitbucketServer`, + `gitLabEnterpriseEdition` and `awsCodeCommit`. :param path: str (optional) Desired path for the repo in the workspace. Almost any path in the workspace can be chosen. If repo - is created in /Repos, path must be in the format /Repos/{folder}/{repo-name}. + is created in `/Repos`, path must be in the format `/Repos/{folder}/{repo-name}`. :param sparse_checkout: :class:`SparseCheckout` (optional) If specified, the repo will be created with sparse checkout enabled. You cannot enable/disable sparse checkout after the repo is created. - :returns: :class:`RepoInfo` + :returns: :class:`CreateRepoResponse` .. py:method:: delete(repo_id: int) @@ -62,12 +62,12 @@ Deletes the specified repo. :param repo_id: int - The ID for the corresponding repo to access. + ID of the Git folder (repo) object in the workspace. - .. py:method:: get(repo_id: int) -> RepoInfo + .. py:method:: get(repo_id: int) -> GetRepoResponse Usage: @@ -94,9 +94,9 @@ Returns the repo with the given repo ID. :param repo_id: int - The ID for the corresponding repo to access. + ID of the Git folder (repo) object in the workspace. - :returns: :class:`RepoInfo` + :returns: :class:`GetRepoResponse` .. py:method:: get_permission_levels(repo_id: str) -> GetRepoPermissionLevelsResponse @@ -139,15 +139,16 @@ Get repos. - Returns repos that the calling user has Manage permissions on. Results are paginated with each page - containing twenty repos. + Returns repos that the calling user has Manage permissions on. Use `next_page_token` to iterate + through additional pages. :param next_page_token: str (optional) Token used to get the next page of results. If not specified, returns the first page of results as well as a next page token if there are more results. :param path_prefix: str (optional) - Filters repos that have paths starting with the given path prefix. If not provided repos from /Repos - will be served. + Filters repos that have paths starting with the given path prefix. If not provided or when provided + an effectively empty prefix (`/` or `/Workspace`) Git folders (repos) from `/Workspace/Repos` will + be served. :returns: Iterator over :class:`RepoInfo` @@ -193,7 +194,7 @@ branch. :param repo_id: int - The ID for the corresponding repo to access. + ID of the Git folder (repo) object in the workspace. :param branch: str (optional) Branch that the local version of the repo is checked out to. :param sparse_checkout: :class:`SparseCheckoutUpdate` (optional)