From 300c221a7cc24a5ba6b0b212bcfca7bda5aefda1 Mon Sep 17 00:00:00 2001 From: Omer Lachish Date: Tue, 2 Sep 2025 10:49:59 +0200 Subject: [PATCH 1/2] Fix pytest-xdist serialization issues in nightly tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pin pytest-xdist to version >=3.6.1,<4.0 to resolve ExceptionInfo serialization errors that were causing test failures in the nightly integration tests. The error occurred when pytest-xdist tried to serialize exception information between worker processes. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 60c33f0e6..b1314929a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,7 +36,7 @@ Documentation = "https://databricks-sdk-py.readthedocs.io" dev = [ "pytest", "pytest-cov", - "pytest-xdist", + "pytest-xdist>=3.6.1,<4.0", "pytest-mock", "black", "pycodestyle", From e593f2aa5794bbc4343076ae335d4a7217b0f272 Mon Sep 17 00:00:00 2001 From: Omer Lachish Date: Tue, 2 Sep 2025 11:59:48 +0200 Subject: [PATCH 2/2] update sdk --- .codegen/_openapi_sha | 2 +- NEXT_CHANGELOG.md | 13 ++ databricks/sdk/service/cleanrooms.py | 7 +- databricks/sdk/service/dashboards.py | 16 +- databricks/sdk/service/database.py | 20 +++ databricks/sdk/service/iam.py | 4 +- databricks/sdk/service/jobs.py | 22 ++- databricks/sdk/service/ml.py | 94 +++++------ databricks/sdk/service/pipelines.py | 130 ++++++++++++++ databricks/sdk/service/settings.py | 71 +++++++- databricks/sdk/service/vectorsearch.py | 72 +++++++- docs/account/iam/groups.rst | 4 +- docs/account/index.rst | 3 +- docs/account/settingsv2/index.rst | 10 ++ docs/account/settingsv2/settings_v2.rst | 46 +++++ docs/dbdataclasses/apps.rst | 101 +++++++++++ docs/dbdataclasses/catalog.rst | 134 ++++++++++++++- docs/dbdataclasses/dashboards.rst | 17 ++ docs/dbdataclasses/database.rst | 8 + docs/dbdataclasses/index.rst | 2 + docs/dbdataclasses/jobs.rst | 12 +- docs/dbdataclasses/ml.rst | 11 +- docs/dbdataclasses/pipelines.rst | 16 ++ docs/dbdataclasses/settingsv2.rst | 159 ++++++++++++++++++ docs/dbdataclasses/sql.rst | 4 + docs/dbdataclasses/tags.rst | 17 ++ docs/dbdataclasses/vectorsearch.rst | 8 + docs/gen-client-docs.py | 2 + docs/workspace/agentbricks/agent_bricks.rst | 6 +- docs/workspace/apps/apps_settings.rst | 60 +++++++ docs/workspace/apps/index.rst | 3 +- docs/workspace/catalog/connections.rst | 8 +- .../catalog/entity_tag_assignments.rst | 119 +++++++++++++ docs/workspace/catalog/index.rst | 4 + docs/workspace/catalog/policies.rst | 96 +++++++++++ docs/workspace/catalog/rfa.rst | 77 +++++++++ docs/workspace/catalog/schemas.rst | 2 +- docs/workspace/catalog/tables.rst | 51 +++++- .../catalog/temporary_path_credentials.rst | 51 ++++++ .../catalog/temporary_table_credentials.rst | 29 ++-- .../cleanrooms/clean_room_assets.rst | 11 +- docs/workspace/dashboards/genie.rst | 64 ++++++- docs/workspace/database/database.rst | 63 ++++++- docs/workspace/index.rst | 2 + docs/workspace/jobs/jobs.rst | 13 +- docs/workspace/serving/serving_endpoints.rst | 41 ++++- .../serving/serving_endpoints_data_plane.rst | 7 +- docs/workspace/settingsv2/index.rst | 10 ++ .../settingsv2/workspace_settings_v2.rst | 46 +++++ docs/workspace/tags/index.rst | 10 ++ docs/workspace/tags/tag_policies.rst | 68 ++++++++ .../vectorsearch/vector_search_endpoints.rst | 3 +- .../vectorsearch/vector_search_indexes.rst | 9 +- 53 files changed, 1732 insertions(+), 126 deletions(-) create mode 100644 docs/account/settingsv2/index.rst create mode 100644 docs/account/settingsv2/settings_v2.rst create mode 100644 docs/dbdataclasses/settingsv2.rst create mode 100644 docs/dbdataclasses/tags.rst create mode 100644 docs/workspace/apps/apps_settings.rst create mode 100644 docs/workspace/catalog/entity_tag_assignments.rst create mode 100644 docs/workspace/catalog/policies.rst create mode 100644 docs/workspace/catalog/rfa.rst create mode 100644 docs/workspace/catalog/temporary_path_credentials.rst create mode 100644 docs/workspace/settingsv2/index.rst create mode 100644 docs/workspace/settingsv2/workspace_settings_v2.rst create mode 100644 docs/workspace/tags/index.rst create mode 100644 docs/workspace/tags/tag_policies.rst diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 3461bf684..8af35ea49 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -6701524136501ef070774942ef5d6e01cfaafb88 \ No newline at end of file +b95c2c6e21bec9551ec7d7d51ddf2dfe390b4522 \ No newline at end of file diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 3e1502b75..2aef202c8 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -13,3 +13,16 @@ ### Internal Changes ### API Changes +* Added `comment` field for `databricks.sdk.service.dashboards.GenieSendMessageFeedbackRequest`. +* [Breaking] Added `rating` field for `databricks.sdk.service.dashboards.GenieSendMessageFeedbackRequest`. +* Added `effective_enable_pg_native_login` and `enable_pg_native_login` fields for `databricks.sdk.service.database.DatabaseInstance`. +* Added `task_retry_mode` field for `databricks.sdk.service.jobs.Continuous`. +* Added `source_configurations` field for `databricks.sdk.service.pipelines.IngestionPipelineDefinition`. +* Added `app_id`, `app_id_set`, `auth_secret`, `auth_secret_set`, `channel_url`, `channel_url_set`, `tenant_id` and `tenant_id_set` fields for `databricks.sdk.service.settings.MicrosoftTeamsConfig`. +* Added `ensure_reranker_compatible` field for `databricks.sdk.service.vectorsearch.GetIndexRequest`. +* Added `reranker` field for `databricks.sdk.service.vectorsearch.QueryVectorIndexRequest`. +* [Breaking] Changed `create_clean_room_asset_review()` method for [w.clean_room_assets](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/cleanrooms/clean_room_assets.html) workspace-level service with new required argument order. +* [Breaking] Changed `send_message_feedback()` method for [w.genie](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/dashboards/genie.html) workspace-level service with new required argument order. +* Changed `notebook_review` field for `databricks.sdk.service.cleanrooms.CreateCleanRoomAssetReviewRequest` to no longer be required. +* [Breaking] Changed `features` field for `databricks.sdk.service.ml.FeatureList` to type list[`databricks.sdk.service.ml.LinkedFeature`] dataclass. +* [Breaking] Removed `feedback_rating` and `feedback_text` fields for `databricks.sdk.service.dashboards.GenieSendMessageFeedbackRequest`. diff --git a/databricks/sdk/service/cleanrooms.py b/databricks/sdk/service/cleanrooms.py index 3840c040c..57ea7e961 100755 --- a/databricks/sdk/service/cleanrooms.py +++ b/databricks/sdk/service/cleanrooms.py @@ -1509,17 +1509,18 @@ def create_clean_room_asset_review( clean_room_name: str, asset_type: CleanRoomAssetAssetType, name: str, - notebook_review: NotebookVersionReview, + *, + notebook_review: Optional[NotebookVersionReview] = None, ) -> CreateCleanRoomAssetReviewResponse: """Submit an asset review :param clean_room_name: str Name of the clean room :param asset_type: :class:`CleanRoomAssetAssetType` - Asset type. Can only be NOTEBOOK_FILE. + Asset type. Can either be NOTEBOOK_FILE or JAR_ANALYSIS. :param name: str Name of the asset - :param notebook_review: :class:`NotebookVersionReview` + :param notebook_review: :class:`NotebookVersionReview` (optional) :returns: :class:`CreateCleanRoomAssetReviewResponse` """ diff --git a/databricks/sdk/service/dashboards.py b/databricks/sdk/service/dashboards.py index 3e13d2e4b..b71f2c866 100755 --- a/databricks/sdk/service/dashboards.py +++ b/databricks/sdk/service/dashboards.py @@ -1975,9 +1975,9 @@ def send_message_feedback( space_id: str, conversation_id: str, message_id: str, - feedback_rating: GenieFeedbackRating, + rating: GenieFeedbackRating, *, - feedback_text: Optional[str] = None, + comment: Optional[str] = None, ): """Send feedback for a message. @@ -1987,18 +1987,18 @@ def send_message_feedback( The ID associated with the conversation. :param message_id: str The ID associated with the message to provide feedback for. - :param feedback_rating: :class:`GenieFeedbackRating` + :param rating: :class:`GenieFeedbackRating` The rating (POSITIVE, NEGATIVE, or NONE). - :param feedback_text: str (optional) + :param comment: str (optional) Optional text feedback that will be stored as a comment. """ body = {} - if feedback_rating is not None: - body["feedback_rating"] = feedback_rating.value - if feedback_text is not None: - body["feedback_text"] = feedback_text + if comment is not None: + body["comment"] = comment + if rating is not None: + body["rating"] = rating.value headers = { "Accept": "application/json", "Content-Type": "application/json", diff --git a/databricks/sdk/service/database.py b/databricks/sdk/service/database.py index 810fc18d9..01c65349f 100755 --- a/databricks/sdk/service/database.py +++ b/databricks/sdk/service/database.py @@ -125,6 +125,13 @@ class DatabaseInstance: creator: Optional[str] = None """The email of the creator of the instance.""" + effective_enable_pg_native_login: Optional[bool] = None + """xref AIP-129. `enable_pg_native_login` is owned by the client, while + `effective_enable_pg_native_login` is owned by the server. `enable_pg_native_login` will only be + set in Create/Update response messages if and only if the user provides the field via the + request. `effective_enable_pg_native_login` on the other hand will always bet set in all + response messages (Create/Update/Get/List).""" + effective_enable_readable_secondaries: Optional[bool] = None """xref AIP-129. `enable_readable_secondaries` is owned by the client, while `effective_enable_readable_secondaries` is owned by the server. `enable_readable_secondaries` @@ -151,6 +158,9 @@ class DatabaseInstance: provides the field via the request. `effective_stopped` on the other hand will always bet set in all response messages (Create/Update/Get/List).""" + enable_pg_native_login: Optional[bool] = None + """Whether the instance has PG native password login enabled. Defaults to true.""" + enable_readable_secondaries: Optional[bool] = None """Whether to enable secondaries to serve read-only traffic. Defaults to false.""" @@ -197,6 +207,8 @@ def as_dict(self) -> dict: body["creation_time"] = self.creation_time if self.creator is not None: body["creator"] = self.creator + if self.effective_enable_pg_native_login is not None: + body["effective_enable_pg_native_login"] = self.effective_enable_pg_native_login if self.effective_enable_readable_secondaries is not None: body["effective_enable_readable_secondaries"] = self.effective_enable_readable_secondaries if self.effective_node_count is not None: @@ -205,6 +217,8 @@ def as_dict(self) -> dict: body["effective_retention_window_in_days"] = self.effective_retention_window_in_days if self.effective_stopped is not None: body["effective_stopped"] = self.effective_stopped + if self.enable_pg_native_login is not None: + body["enable_pg_native_login"] = self.enable_pg_native_login if self.enable_readable_secondaries is not None: body["enable_readable_secondaries"] = self.enable_readable_secondaries if self.name is not None: @@ -240,6 +254,8 @@ def as_shallow_dict(self) -> dict: body["creation_time"] = self.creation_time if self.creator is not None: body["creator"] = self.creator + if self.effective_enable_pg_native_login is not None: + body["effective_enable_pg_native_login"] = self.effective_enable_pg_native_login if self.effective_enable_readable_secondaries is not None: body["effective_enable_readable_secondaries"] = self.effective_enable_readable_secondaries if self.effective_node_count is not None: @@ -248,6 +264,8 @@ def as_shallow_dict(self) -> dict: body["effective_retention_window_in_days"] = self.effective_retention_window_in_days if self.effective_stopped is not None: body["effective_stopped"] = self.effective_stopped + if self.enable_pg_native_login is not None: + body["enable_pg_native_login"] = self.enable_pg_native_login if self.enable_readable_secondaries is not None: body["enable_readable_secondaries"] = self.enable_readable_secondaries if self.name is not None: @@ -280,10 +298,12 @@ def from_dict(cls, d: Dict[str, Any]) -> DatabaseInstance: child_instance_refs=_repeated_dict(d, "child_instance_refs", DatabaseInstanceRef), creation_time=d.get("creation_time", None), creator=d.get("creator", None), + effective_enable_pg_native_login=d.get("effective_enable_pg_native_login", None), effective_enable_readable_secondaries=d.get("effective_enable_readable_secondaries", None), effective_node_count=d.get("effective_node_count", None), effective_retention_window_in_days=d.get("effective_retention_window_in_days", None), effective_stopped=d.get("effective_stopped", None), + enable_pg_native_login=d.get("enable_pg_native_login", None), enable_readable_secondaries=d.get("enable_readable_secondaries", None), name=d.get("name", None), node_count=d.get("node_count", None), diff --git a/databricks/sdk/service/iam.py b/databricks/sdk/service/iam.py index 09166b04f..2991a890f 100755 --- a/databricks/sdk/service/iam.py +++ b/databricks/sdk/service/iam.py @@ -2193,7 +2193,9 @@ def list( sort_order: Optional[ListSortOrder] = None, start_index: Optional[int] = None, ) -> Iterator[Group]: - """Gets all details of the groups associated with the Databricks account. + """Gets all details of the groups associated with the Databricks account. As of 08/22/2025, this endpoint + will not return members. Instead, members should be retrieved by iterating through `Get group + details`. :param attributes: str (optional) Comma-separated list of attributes to return in response. diff --git a/databricks/sdk/service/jobs.py b/databricks/sdk/service/jobs.py index 96ee972bf..9f023448b 100755 --- a/databricks/sdk/service/jobs.py +++ b/databricks/sdk/service/jobs.py @@ -873,11 +873,16 @@ class Continuous: pause_status: Optional[PauseStatus] = None """Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED.""" + task_retry_mode: Optional[TaskRetryMode] = None + """Indicate whether the continuous job is applying task level retries or not. Defaults to NEVER.""" + def as_dict(self) -> dict: """Serializes the Continuous into a dictionary suitable for use as a JSON request body.""" body = {} if self.pause_status is not None: body["pause_status"] = self.pause_status.value + if self.task_retry_mode is not None: + body["task_retry_mode"] = self.task_retry_mode.value return body def as_shallow_dict(self) -> dict: @@ -885,12 +890,17 @@ def as_shallow_dict(self) -> dict: body = {} if self.pause_status is not None: body["pause_status"] = self.pause_status + if self.task_retry_mode is not None: + body["task_retry_mode"] = self.task_retry_mode return body @classmethod def from_dict(cls, d: Dict[str, Any]) -> Continuous: """Deserializes the Continuous from a dictionary.""" - return cls(pause_status=_enum(d, "pause_status", PauseStatus)) + return cls( + pause_status=_enum(d, "pause_status", PauseStatus), + task_retry_mode=_enum(d, "task_retry_mode", TaskRetryMode), + ) @dataclass @@ -7891,6 +7901,16 @@ def from_dict(cls, d: Dict[str, Any]) -> TaskNotificationSettings: ) +class TaskRetryMode(Enum): + """task retry mode of the continuous job * NEVER: The failed task will not be retried. * + ON_FAILURE: Retry a failed task if at least one other task in the job is still running its first + attempt. When this condition is no longer met or the retry limit is reached, the job run is + cancelled and a new run is started.""" + + NEVER = "NEVER" + ON_FAILURE = "ON_FAILURE" + + class TerminationCodeCode(Enum): """The code indicates why the run was terminated. Additional codes might be introduced in future releases. * `SUCCESS`: The run was completed successfully. * `SUCCESS_WITH_FAILURES`: The run diff --git a/databricks/sdk/service/ml.py b/databricks/sdk/service/ml.py index 85165d73a..6ca964fd4 100755 --- a/databricks/sdk/service/ml.py +++ b/databricks/sdk/service/ml.py @@ -1210,51 +1210,6 @@ def from_dict(cls, d: Dict[str, Any]) -> ExperimentTag: return cls(key=d.get("key", None), value=d.get("value", None)) -@dataclass -class Feature: - """Feature for model version.""" - - feature_name: Optional[str] = None - """Feature name""" - - feature_table_id: Optional[str] = None - """Feature table id""" - - feature_table_name: Optional[str] = None - """Feature table name""" - - def as_dict(self) -> dict: - """Serializes the Feature into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.feature_name is not None: - body["feature_name"] = self.feature_name - if self.feature_table_id is not None: - body["feature_table_id"] = self.feature_table_id - if self.feature_table_name is not None: - body["feature_table_name"] = self.feature_table_name - return body - - def as_shallow_dict(self) -> dict: - """Serializes the Feature into a shallow dictionary of its immediate attributes.""" - body = {} - if self.feature_name is not None: - body["feature_name"] = self.feature_name - if self.feature_table_id is not None: - body["feature_table_id"] = self.feature_table_id - if self.feature_table_name is not None: - body["feature_table_name"] = self.feature_table_name - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> Feature: - """Deserializes the Feature from a dictionary.""" - return cls( - feature_name=d.get("feature_name", None), - feature_table_id=d.get("feature_table_id", None), - feature_table_name=d.get("feature_table_name", None), - ) - - @dataclass class FeatureLineage: feature_specs: Optional[List[FeatureLineageFeatureSpec]] = None @@ -1391,7 +1346,7 @@ def from_dict(cls, d: Dict[str, Any]) -> FeatureLineageOnlineFeature: class FeatureList: """Feature list wrap all the features for a model version""" - features: Optional[List[Feature]] = None + features: Optional[List[LinkedFeature]] = None def as_dict(self) -> dict: """Serializes the FeatureList into a dictionary suitable for use as a JSON request body.""" @@ -1410,7 +1365,7 @@ def as_shallow_dict(self) -> dict: @classmethod def from_dict(cls, d: Dict[str, Any]) -> FeatureList: """Deserializes the FeatureList from a dictionary.""" - return cls(features=_repeated_dict(d, "features", Feature)) + return cls(features=_repeated_dict(d, "features", LinkedFeature)) @dataclass @@ -2054,6 +2009,51 @@ def from_dict(cls, d: Dict[str, Any]) -> JobSpecWithoutSecret: return cls(job_id=d.get("job_id", None), workspace_url=d.get("workspace_url", None)) +@dataclass +class LinkedFeature: + """Feature for model version. ([ML-57150] Renamed from Feature to LinkedFeature)""" + + feature_name: Optional[str] = None + """Feature name""" + + feature_table_id: Optional[str] = None + """Feature table id""" + + feature_table_name: Optional[str] = None + """Feature table name""" + + def as_dict(self) -> dict: + """Serializes the LinkedFeature into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.feature_name is not None: + body["feature_name"] = self.feature_name + if self.feature_table_id is not None: + body["feature_table_id"] = self.feature_table_id + if self.feature_table_name is not None: + body["feature_table_name"] = self.feature_table_name + return body + + def as_shallow_dict(self) -> dict: + """Serializes the LinkedFeature into a shallow dictionary of its immediate attributes.""" + body = {} + if self.feature_name is not None: + body["feature_name"] = self.feature_name + if self.feature_table_id is not None: + body["feature_table_id"] = self.feature_table_id + if self.feature_table_name is not None: + body["feature_table_name"] = self.feature_table_name + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> LinkedFeature: + """Deserializes the LinkedFeature from a dictionary.""" + return cls( + feature_name=d.get("feature_name", None), + feature_table_id=d.get("feature_table_id", None), + feature_table_name=d.get("feature_table_name", None), + ) + + @dataclass class ListArtifactsResponse: files: Optional[List[FileInfo]] = None diff --git a/databricks/sdk/service/pipelines.py b/databricks/sdk/service/pipelines.py index d59dab0a5..a9e8ee11b 100755 --- a/databricks/sdk/service/pipelines.py +++ b/databricks/sdk/service/pipelines.py @@ -613,6 +613,9 @@ class IngestionPipelineDefinition: objects: Optional[List[IngestionConfig]] = None """Required. Settings specifying tables to replicate and the destination for the replicated tables.""" + source_configurations: Optional[List[SourceConfig]] = None + """Top-level source configurations""" + source_type: Optional[IngestionSourceType] = None """The type of the foreign source. The source type will be inferred from the source connection or ingestion gateway. This field is output only and will be ignored if provided.""" @@ -630,6 +633,8 @@ def as_dict(self) -> dict: body["ingestion_gateway_id"] = self.ingestion_gateway_id if self.objects: body["objects"] = [v.as_dict() for v in self.objects] + if self.source_configurations: + body["source_configurations"] = [v.as_dict() for v in self.source_configurations] if self.source_type is not None: body["source_type"] = self.source_type.value if self.table_configuration: @@ -645,6 +650,8 @@ def as_shallow_dict(self) -> dict: body["ingestion_gateway_id"] = self.ingestion_gateway_id if self.objects: body["objects"] = self.objects + if self.source_configurations: + body["source_configurations"] = self.source_configurations if self.source_type is not None: body["source_type"] = self.source_type if self.table_configuration: @@ -658,6 +665,7 @@ def from_dict(cls, d: Dict[str, Any]) -> IngestionPipelineDefinition: connection_name=d.get("connection_name", None), ingestion_gateway_id=d.get("ingestion_gateway_id", None), objects=_repeated_dict(d, "objects", IngestionConfig), + source_configurations=_repeated_dict(d, "source_configurations", SourceConfig), source_type=_enum(d, "source_type", IngestionSourceType), table_configuration=_from_dict(d, "table_configuration", TableSpecificConfig), ) @@ -2239,6 +2247,67 @@ def from_dict(cls, d: Dict[str, Any]) -> PipelinesEnvironment: return cls(dependencies=d.get("dependencies", None)) +@dataclass +class PostgresCatalogConfig: + """PG-specific catalog-level configuration parameters""" + + slot_config: Optional[PostgresSlotConfig] = None + """Optional. The Postgres slot configuration to use for logical replication""" + + def as_dict(self) -> dict: + """Serializes the PostgresCatalogConfig into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.slot_config: + body["slot_config"] = self.slot_config.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the PostgresCatalogConfig into a shallow dictionary of its immediate attributes.""" + body = {} + if self.slot_config: + body["slot_config"] = self.slot_config + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> PostgresCatalogConfig: + """Deserializes the PostgresCatalogConfig from a dictionary.""" + return cls(slot_config=_from_dict(d, "slot_config", PostgresSlotConfig)) + + +@dataclass +class PostgresSlotConfig: + """PostgresSlotConfig contains the configuration for a Postgres logical replication slot""" + + publication_name: Optional[str] = None + """The name of the publication to use for the Postgres source""" + + slot_name: Optional[str] = None + """The name of the logical replication slot to use for the Postgres source""" + + def as_dict(self) -> dict: + """Serializes the PostgresSlotConfig into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.publication_name is not None: + body["publication_name"] = self.publication_name + if self.slot_name is not None: + body["slot_name"] = self.slot_name + return body + + def as_shallow_dict(self) -> dict: + """Serializes the PostgresSlotConfig into a shallow dictionary of its immediate attributes.""" + body = {} + if self.publication_name is not None: + body["publication_name"] = self.publication_name + if self.slot_name is not None: + body["slot_name"] = self.slot_name + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> PostgresSlotConfig: + """Deserializes the PostgresSlotConfig from a dictionary.""" + return cls(publication_name=d.get("publication_name", None), slot_name=d.get("slot_name", None)) + + @dataclass class ReportSpec: source_url: str @@ -2527,6 +2596,67 @@ def from_dict(cls, d: Dict[str, Any]) -> SerializedException: ) +@dataclass +class SourceCatalogConfig: + """SourceCatalogConfig contains catalog-level custom configuration parameters for each source""" + + postgres: Optional[PostgresCatalogConfig] = None + """Postgres-specific catalog-level configuration parameters""" + + source_catalog: Optional[str] = None + """Source catalog name""" + + def as_dict(self) -> dict: + """Serializes the SourceCatalogConfig into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.postgres: + body["postgres"] = self.postgres.as_dict() + if self.source_catalog is not None: + body["source_catalog"] = self.source_catalog + return body + + def as_shallow_dict(self) -> dict: + """Serializes the SourceCatalogConfig into a shallow dictionary of its immediate attributes.""" + body = {} + if self.postgres: + body["postgres"] = self.postgres + if self.source_catalog is not None: + body["source_catalog"] = self.source_catalog + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> SourceCatalogConfig: + """Deserializes the SourceCatalogConfig from a dictionary.""" + return cls( + postgres=_from_dict(d, "postgres", PostgresCatalogConfig), source_catalog=d.get("source_catalog", None) + ) + + +@dataclass +class SourceConfig: + catalog: Optional[SourceCatalogConfig] = None + """Catalog-level source configuration parameters""" + + def as_dict(self) -> dict: + """Serializes the SourceConfig into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.catalog: + body["catalog"] = self.catalog.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the SourceConfig into a shallow dictionary of its immediate attributes.""" + body = {} + if self.catalog: + body["catalog"] = self.catalog + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> SourceConfig: + """Deserializes the SourceConfig from a dictionary.""" + return cls(catalog=_from_dict(d, "catalog", SourceCatalogConfig)) + + @dataclass class StackFrame: declaring_class: Optional[str] = None diff --git a/databricks/sdk/service/settings.py b/databricks/sdk/service/settings.py index 385422529..3004f17da 100755 --- a/databricks/sdk/service/settings.py +++ b/databricks/sdk/service/settings.py @@ -3587,8 +3587,32 @@ def from_dict(cls, d: Dict[str, Any]) -> LlmProxyPartnerPoweredWorkspace: @dataclass class MicrosoftTeamsConfig: + app_id: Optional[str] = None + """[Input-Only] App ID for Microsoft Teams App.""" + + app_id_set: Optional[bool] = None + """[Output-Only] Whether App ID is set.""" + + auth_secret: Optional[str] = None + """[Input-Only] Secret for Microsoft Teams App authentication.""" + + auth_secret_set: Optional[bool] = None + """[Output-Only] Whether secret is set.""" + + channel_url: Optional[str] = None + """[Input-Only] Channel URL for Microsoft Teams App.""" + + channel_url_set: Optional[bool] = None + """[Output-Only] Whether Channel URL is set.""" + + tenant_id: Optional[str] = None + """[Input-Only] Tenant ID for Microsoft Teams App.""" + + tenant_id_set: Optional[bool] = None + """[Output-Only] Whether Tenant ID is set.""" + url: Optional[str] = None - """[Input-Only] URL for Microsoft Teams.""" + """[Input-Only] URL for Microsoft Teams webhook.""" url_set: Optional[bool] = None """[Output-Only] Whether URL is set.""" @@ -3596,6 +3620,22 @@ class MicrosoftTeamsConfig: def as_dict(self) -> dict: """Serializes the MicrosoftTeamsConfig into a dictionary suitable for use as a JSON request body.""" body = {} + if self.app_id is not None: + body["app_id"] = self.app_id + if self.app_id_set is not None: + body["app_id_set"] = self.app_id_set + if self.auth_secret is not None: + body["auth_secret"] = self.auth_secret + if self.auth_secret_set is not None: + body["auth_secret_set"] = self.auth_secret_set + if self.channel_url is not None: + body["channel_url"] = self.channel_url + if self.channel_url_set is not None: + body["channel_url_set"] = self.channel_url_set + if self.tenant_id is not None: + body["tenant_id"] = self.tenant_id + if self.tenant_id_set is not None: + body["tenant_id_set"] = self.tenant_id_set if self.url is not None: body["url"] = self.url if self.url_set is not None: @@ -3605,6 +3645,22 @@ def as_dict(self) -> dict: def as_shallow_dict(self) -> dict: """Serializes the MicrosoftTeamsConfig into a shallow dictionary of its immediate attributes.""" body = {} + if self.app_id is not None: + body["app_id"] = self.app_id + if self.app_id_set is not None: + body["app_id_set"] = self.app_id_set + if self.auth_secret is not None: + body["auth_secret"] = self.auth_secret + if self.auth_secret_set is not None: + body["auth_secret_set"] = self.auth_secret_set + if self.channel_url is not None: + body["channel_url"] = self.channel_url + if self.channel_url_set is not None: + body["channel_url_set"] = self.channel_url_set + if self.tenant_id is not None: + body["tenant_id"] = self.tenant_id + if self.tenant_id_set is not None: + body["tenant_id_set"] = self.tenant_id_set if self.url is not None: body["url"] = self.url if self.url_set is not None: @@ -3614,7 +3670,18 @@ def as_shallow_dict(self) -> dict: @classmethod def from_dict(cls, d: Dict[str, Any]) -> MicrosoftTeamsConfig: """Deserializes the MicrosoftTeamsConfig from a dictionary.""" - return cls(url=d.get("url", None), url_set=d.get("url_set", None)) + return cls( + app_id=d.get("app_id", None), + app_id_set=d.get("app_id_set", None), + auth_secret=d.get("auth_secret", None), + auth_secret_set=d.get("auth_secret_set", None), + channel_url=d.get("channel_url", None), + channel_url_set=d.get("channel_url_set", None), + tenant_id=d.get("tenant_id", None), + tenant_id_set=d.get("tenant_id_set", None), + url=d.get("url", None), + url_set=d.get("url_set", None), + ) @dataclass diff --git a/databricks/sdk/service/vectorsearch.py b/databricks/sdk/service/vectorsearch.py index da89959a0..8e706ccd6 100755 --- a/databricks/sdk/service/vectorsearch.py +++ b/databricks/sdk/service/vectorsearch.py @@ -872,6 +872,60 @@ def from_dict(cls, d: Dict[str, Any]) -> QueryVectorIndexResponse: ) +@dataclass +class RerankerConfig: + model: Optional[str] = None + + parameters: Optional[RerankerConfigRerankerParameters] = None + + def as_dict(self) -> dict: + """Serializes the RerankerConfig into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.model is not None: + body["model"] = self.model + if self.parameters: + body["parameters"] = self.parameters.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the RerankerConfig into a shallow dictionary of its immediate attributes.""" + body = {} + if self.model is not None: + body["model"] = self.model + if self.parameters: + body["parameters"] = self.parameters + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> RerankerConfig: + """Deserializes the RerankerConfig from a dictionary.""" + return cls(model=d.get("model", None), parameters=_from_dict(d, "parameters", RerankerConfigRerankerParameters)) + + +@dataclass +class RerankerConfigRerankerParameters: + columns_to_rerank: Optional[List[str]] = None + + def as_dict(self) -> dict: + """Serializes the RerankerConfigRerankerParameters into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.columns_to_rerank: + body["columns_to_rerank"] = [v for v in self.columns_to_rerank] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the RerankerConfigRerankerParameters into a shallow dictionary of its immediate attributes.""" + body = {} + if self.columns_to_rerank: + body["columns_to_rerank"] = self.columns_to_rerank + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> RerankerConfigRerankerParameters: + """Deserializes the RerankerConfigRerankerParameters from a dictionary.""" + return cls(columns_to_rerank=d.get("columns_to_rerank", None)) + + @dataclass class ResultData: """Data returned in the query result.""" @@ -1468,7 +1522,8 @@ def update_endpoint_budget_policy( :param endpoint_name: str Name of the vector search endpoint :param budget_policy_id: str - The budget policy id to be applied + The budget policy id to be applied (hima-sheth) TODO: remove this once we've migrated to usage + policies :returns: :class:`PatchEndpointBudgetPolicyResponse` """ @@ -1608,20 +1663,27 @@ def delete_index(self, index_name: str): self._api.do("DELETE", f"/api/2.0/vector-search/indexes/{index_name}", headers=headers) - def get_index(self, index_name: str) -> VectorIndex: + def get_index(self, index_name: str, *, ensure_reranker_compatible: Optional[bool] = None) -> VectorIndex: """Get an index. :param index_name: str Name of the index + :param ensure_reranker_compatible: bool (optional) + If true, the URL returned for the index is guaranteed to be compatible with the reranker. Currently + this means we return the CP URL regardless of how the index is being accessed. If not set or set to + false, the URL may still be compatible with the reranker depending on what URL we return. :returns: :class:`VectorIndex` """ + query = {} + if ensure_reranker_compatible is not None: + query["ensure_reranker_compatible"] = ensure_reranker_compatible headers = { "Accept": "application/json", } - res = self._api.do("GET", f"/api/2.0/vector-search/indexes/{index_name}", headers=headers) + res = self._api.do("GET", f"/api/2.0/vector-search/indexes/{index_name}", query=query, headers=headers) return VectorIndex.from_dict(res) def list_indexes(self, endpoint_name: str, *, page_token: Optional[str] = None) -> Iterator[MiniVectorIndex]: @@ -1664,6 +1726,7 @@ def query_index( query_text: Optional[str] = None, query_type: Optional[str] = None, query_vector: Optional[List[float]] = None, + reranker: Optional[RerankerConfig] = None, score_threshold: Optional[float] = None, ) -> QueryVectorIndexResponse: """Query the specified vector index. @@ -1691,6 +1754,7 @@ def query_index( :param query_vector: List[float] (optional) Query vector. Required for Direct Vector Access Index and Delta Sync Index using self-managed vectors. + :param reranker: :class:`RerankerConfig` (optional) :param score_threshold: float (optional) Threshold for the approximate nearest neighbor search. Defaults to 0.0. @@ -1711,6 +1775,8 @@ def query_index( body["query_type"] = query_type if query_vector is not None: body["query_vector"] = [v for v in query_vector] + if reranker is not None: + body["reranker"] = reranker.as_dict() if score_threshold is not None: body["score_threshold"] = score_threshold headers = { diff --git a/docs/account/iam/groups.rst b/docs/account/iam/groups.rst index 47b5d6cbb..dbe024c2a 100644 --- a/docs/account/iam/groups.rst +++ b/docs/account/iam/groups.rst @@ -59,7 +59,9 @@ .. py:method:: list( [, attributes: Optional[str], count: Optional[int], excluded_attributes: Optional[str], filter: Optional[str], sort_by: Optional[str], sort_order: Optional[ListSortOrder], start_index: Optional[int]]) -> Iterator[Group] - Gets all details of the groups associated with the Databricks account. + Gets all details of the groups associated with the Databricks account. As of 08/22/2025, this endpoint + will not return members. Instead, members should be retrieved by iterating through `Get group + details`. :param attributes: str (optional) Comma-separated list of attributes to return in response. diff --git a/docs/account/index.rst b/docs/account/index.rst index c0deff236..c355016d1 100644 --- a/docs/account/index.rst +++ b/docs/account/index.rst @@ -12,4 +12,5 @@ These APIs are available from AccountClient iam/index oauth2/index provisioning/index - settings/index \ No newline at end of file + settings/index + settingsv2/index \ No newline at end of file diff --git a/docs/account/settingsv2/index.rst b/docs/account/settingsv2/index.rst new file mode 100644 index 000000000..5d16de84c --- /dev/null +++ b/docs/account/settingsv2/index.rst @@ -0,0 +1,10 @@ + +SettingsV2 +========== + +Manage admin settings + +.. toctree:: + :maxdepth: 1 + + settings_v2 \ No newline at end of file diff --git a/docs/account/settingsv2/settings_v2.rst b/docs/account/settingsv2/settings_v2.rst new file mode 100644 index 000000000..87cdbd619 --- /dev/null +++ b/docs/account/settingsv2/settings_v2.rst @@ -0,0 +1,46 @@ +``a.settings_v2``: AccountSettings.v2 +===================================== +.. currentmodule:: databricks.sdk.service.settingsv2 + +.. py:class:: AccountSettingsV2API + + APIs to manage account level settings + + .. py:method:: get_public_account_setting(name: str) -> Setting + + Get a setting value at account level + + :param name: str + + :returns: :class:`Setting` + + + .. py:method:: list_account_settings_metadata( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[SettingsMetadata] + + List valid setting keys and metadata. These settings are available to referenced via [GET + /api/2.1/settings/{name}](#~1api~1account~1settingsv2~1getpublicaccountsetting) and [PATCH + /api/2.1/settings/{name}](#~1api~1account~1settingsv2~patchpublicaccountsetting) APIs + + :param page_size: int (optional) + The maximum number of settings to return. The service may return fewer than this value. If + unspecified, at most 200 settings will be returned. The maximum value is 1000; values above 1000 + will be coerced to 1000. + :param page_token: str (optional) + A page token, received from a previous `ListAccountSettingsMetadataRequest` call. Provide this to + retrieve the subsequent page. + + When paginating, all other parameters provided to `ListAccountSettingsMetadataRequest` must match + the call that provided the page token. + + :returns: Iterator over :class:`SettingsMetadata` + + + .. py:method:: patch_public_account_setting(name: str, setting: Setting) -> Setting + + Patch a setting value at account level + + :param name: str + :param setting: :class:`Setting` + + :returns: :class:`Setting` + \ No newline at end of file diff --git a/docs/dbdataclasses/apps.rst b/docs/dbdataclasses/apps.rst index bfed63efb..3a562163a 100644 --- a/docs/dbdataclasses/apps.rst +++ b/docs/dbdataclasses/apps.rst @@ -50,6 +50,99 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: AppManifest + :members: + :undoc-members: + +.. autoclass:: AppManifestAppResourceJobSpec + :members: + :undoc-members: + +.. py:class:: AppManifestAppResourceJobSpecJobPermission + + .. py:attribute:: CAN_MANAGE + :value: "CAN_MANAGE" + + .. py:attribute:: CAN_MANAGE_RUN + :value: "CAN_MANAGE_RUN" + + .. py:attribute:: CAN_VIEW + :value: "CAN_VIEW" + + .. py:attribute:: IS_OWNER + :value: "IS_OWNER" + +.. autoclass:: AppManifestAppResourceSecretSpec + :members: + :undoc-members: + +.. py:class:: AppManifestAppResourceSecretSpecSecretPermission + + Permission to grant on the secret scope. Supported permissions are: "READ", "WRITE", "MANAGE". + + .. py:attribute:: MANAGE + :value: "MANAGE" + + .. py:attribute:: READ + :value: "READ" + + .. py:attribute:: WRITE + :value: "WRITE" + +.. autoclass:: AppManifestAppResourceServingEndpointSpec + :members: + :undoc-members: + +.. py:class:: AppManifestAppResourceServingEndpointSpecServingEndpointPermission + + .. py:attribute:: CAN_MANAGE + :value: "CAN_MANAGE" + + .. py:attribute:: CAN_QUERY + :value: "CAN_QUERY" + + .. py:attribute:: CAN_VIEW + :value: "CAN_VIEW" + +.. autoclass:: AppManifestAppResourceSpec + :members: + :undoc-members: + +.. autoclass:: AppManifestAppResourceSqlWarehouseSpec + :members: + :undoc-members: + +.. py:class:: AppManifestAppResourceSqlWarehouseSpecSqlWarehousePermission + + .. py:attribute:: CAN_MANAGE + :value: "CAN_MANAGE" + + .. py:attribute:: CAN_USE + :value: "CAN_USE" + + .. py:attribute:: IS_OWNER + :value: "IS_OWNER" + +.. autoclass:: AppManifestAppResourceUcSecurableSpec + :members: + :undoc-members: + +.. py:class:: AppManifestAppResourceUcSecurableSpecUcSecurablePermission + + .. py:attribute:: MANAGE + :value: "MANAGE" + + .. py:attribute:: READ_VOLUME + :value: "READ_VOLUME" + + .. py:attribute:: WRITE_VOLUME + :value: "WRITE_VOLUME" + +.. py:class:: AppManifestAppResourceUcSecurableSpecUcSecurableType + + .. py:attribute:: VOLUME + :value: "VOLUME" + .. autoclass:: AppPermission :members: :undoc-members: @@ -212,6 +305,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: CustomTemplate + :members: + :undoc-members: + .. autoclass:: GetAppPermissionLevelsResponse :members: :undoc-members: @@ -223,3 +320,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. autoclass:: ListAppsResponse :members: :undoc-members: + +.. autoclass:: ListCustomTemplatesResponse + :members: + :undoc-members: diff --git a/docs/dbdataclasses/catalog.rst b/docs/dbdataclasses/catalog.rst index 595a14ac7..ec7a54980 100644 --- a/docs/dbdataclasses/catalog.rst +++ b/docs/dbdataclasses/catalog.rst @@ -4,6 +4,10 @@ Unity Catalog These dataclasses are used in the SDK to represent API requests and responses for services in the ``databricks.sdk.service.catalog`` module. .. py:currentmodule:: databricks.sdk.service.catalog +.. autoclass:: AccessRequestDestinations + :members: + :undoc-members: + .. autoclass:: AccountsMetastoreAssignment :members: :undoc-members: @@ -89,6 +93,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: BatchCreateAccessRequestsResponse + :members: + :undoc-members: + .. autoclass:: CancelRefreshResponse :members: :undoc-members: @@ -139,6 +147,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ColumnMaskOptions + :members: + :undoc-members: + .. autoclass:: ColumnRelationship :members: :undoc-members: @@ -293,6 +305,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: CreateAccessRequest + :members: + :undoc-members: + +.. autoclass:: CreateAccessRequestResponse + :members: + :undoc-members: + .. autoclass:: CreateFunction :members: :undoc-members: @@ -535,6 +555,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: DeletePolicyResponse + :members: + :undoc-members: + .. autoclass:: DeleteRequestExternalLineage :members: :undoc-members: @@ -567,6 +591,23 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. py:class:: DestinationType + + .. py:attribute:: EMAIL + :value: "EMAIL" + + .. py:attribute:: GENERIC_WEBHOOK + :value: "GENERIC_WEBHOOK" + + .. py:attribute:: MICROSOFT_TEAMS + :value: "MICROSOFT_TEAMS" + + .. py:attribute:: SLACK + :value: "SLACK" + + .. py:attribute:: URL + :value: "URL" + .. autoclass:: DisableResponse :members: :undoc-members: @@ -616,7 +657,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: EnvironmentSettings +.. autoclass:: EntityTagAssignment :members: :undoc-members: @@ -688,6 +729,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: FunctionArgument + :members: + :undoc-members: + .. autoclass:: FunctionDependency :members: :undoc-members: @@ -766,6 +811,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: GenerateTemporaryPathCredentialResponse + :members: + :undoc-members: + .. autoclass:: GenerateTemporaryServiceCredentialAzureOptions :members: :undoc-members: @@ -834,6 +883,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ListEntityTagAssignmentsResponse + :members: + :undoc-members: + .. autoclass:: ListExternalLineageRelationshipsResponse :members: :undoc-members: @@ -858,6 +911,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ListPoliciesResponse + :members: + :undoc-members: + .. autoclass:: ListQuotasResponse :members: :undoc-members: @@ -890,6 +947,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: MatchColumn + :members: + :undoc-members: + .. py:class:: MatchType The artifact pattern matching type @@ -1054,6 +1115,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: NotificationDestination + :members: + :undoc-members: + .. autoclass:: OnlineTable :members: :undoc-members: @@ -1150,6 +1215,17 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: OPTION_STRING :value: "OPTION_STRING" +.. py:class:: PathOperation + + .. py:attribute:: PATH_CREATE_TABLE + :value: "PATH_CREATE_TABLE" + + .. py:attribute:: PATH_READ + :value: "PATH_READ" + + .. py:attribute:: PATH_READ_WRITE + :value: "PATH_READ_WRITE" + .. autoclass:: PermissionsChange :members: :undoc-members: @@ -1158,10 +1234,37 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: PolicyInfo + :members: + :undoc-members: + +.. py:class:: PolicyType + + .. py:attribute:: POLICY_TYPE_COLUMN_MASK + :value: "POLICY_TYPE_COLUMN_MASK" + + .. py:attribute:: POLICY_TYPE_ROW_FILTER + :value: "POLICY_TYPE_ROW_FILTER" + .. autoclass:: PrimaryKeyConstraint :members: :undoc-members: +.. autoclass:: Principal + :members: + :undoc-members: + +.. py:class:: PrincipalType + + .. py:attribute:: GROUP_PRINCIPAL + :value: "GROUP_PRINCIPAL" + + .. py:attribute:: SERVICE_PRINCIPAL + :value: "SERVICE_PRINCIPAL" + + .. py:attribute:: USER_PRINCIPAL + :value: "USER_PRINCIPAL" + .. py:class:: Privilege .. py:attribute:: ACCESS @@ -1363,10 +1466,18 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: RowFilterOptions + :members: + :undoc-members: + .. autoclass:: SchemaInfo :members: :undoc-members: +.. autoclass:: Securable + :members: + :undoc-members: + .. py:class:: SecurableKind .. py:attribute:: TABLE_DB_STORAGE @@ -1553,6 +1664,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: SecurablePermissions + :members: + :undoc-members: + .. py:class:: SecurableType The type of Unity Catalog securable. @@ -1608,6 +1723,23 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: VOLUME :value: "VOLUME" +.. py:class:: SpecialDestination + + .. py:attribute:: SPECIAL_DESTINATION_CATALOG_OWNER + :value: "SPECIAL_DESTINATION_CATALOG_OWNER" + + .. py:attribute:: SPECIAL_DESTINATION_CONNECTION_OWNER + :value: "SPECIAL_DESTINATION_CONNECTION_OWNER" + + .. py:attribute:: SPECIAL_DESTINATION_CREDENTIAL_OWNER + :value: "SPECIAL_DESTINATION_CREDENTIAL_OWNER" + + .. py:attribute:: SPECIAL_DESTINATION_EXTERNAL_LOCATION_OWNER + :value: "SPECIAL_DESTINATION_EXTERNAL_LOCATION_OWNER" + + .. py:attribute:: SPECIAL_DESTINATION_METASTORE_OWNER + :value: "SPECIAL_DESTINATION_METASTORE_OWNER" + .. autoclass:: SseEncryptionDetails :members: :undoc-members: diff --git a/docs/dbdataclasses/dashboards.rst b/docs/dbdataclasses/dashboards.rst index 105bc108c..559da8424 100644 --- a/docs/dbdataclasses/dashboards.rst +++ b/docs/dbdataclasses/dashboards.rst @@ -37,10 +37,27 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. py:class:: GenieFeedbackRating + + Feedback rating for Genie messages + + .. py:attribute:: NEGATIVE + :value: "NEGATIVE" + + .. py:attribute:: NONE + :value: "NONE" + + .. py:attribute:: POSITIVE + :value: "POSITIVE" + .. autoclass:: GenieGetMessageQueryResultResponse :members: :undoc-members: +.. autoclass:: GenieListConversationMessagesResponse + :members: + :undoc-members: + .. autoclass:: GenieListConversationsResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/database.rst b/docs/dbdataclasses/database.rst index 534c9b0d0..bdf7a2161 100644 --- a/docs/dbdataclasses/database.rst +++ b/docs/dbdataclasses/database.rst @@ -77,6 +77,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ListDatabaseCatalogsResponse + :members: + :undoc-members: + .. autoclass:: ListDatabaseInstanceRolesResponse :members: :undoc-members: @@ -85,6 +89,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ListSyncedDatabaseTablesResponse + :members: + :undoc-members: + .. autoclass:: NewPipelineSpec :members: :undoc-members: diff --git a/docs/dbdataclasses/index.rst b/docs/dbdataclasses/index.rst index 4b769f812..4b1463f0b 100644 --- a/docs/dbdataclasses/index.rst +++ b/docs/dbdataclasses/index.rst @@ -24,7 +24,9 @@ Dataclasses qualitymonitorv2 serving settings + settingsv2 sharing sql + tags vectorsearch workspace \ No newline at end of file diff --git a/docs/dbdataclasses/jobs.rst b/docs/dbdataclasses/jobs.rst index 644497acd..c56480399 100644 --- a/docs/dbdataclasses/jobs.rst +++ b/docs/dbdataclasses/jobs.rst @@ -976,9 +976,19 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. py:class:: TaskRetryMode + + task retry mode of the continuous job * NEVER: The failed task will not be retried. * ON_FAILURE: Retry a failed task if at least one other task in the job is still running its first attempt. When this condition is no longer met or the retry limit is reached, the job run is cancelled and a new run is started. + + .. py:attribute:: NEVER + :value: "NEVER" + + .. py:attribute:: ON_FAILURE + :value: "ON_FAILURE" + .. py:class:: TerminationCodeCode - The code indicates why the run was terminated. Additional codes might be introduced in future releases. * `SUCCESS`: The run was completed successfully. * `SUCCESS_WITH_FAILURES`: The run was completed successfully but some child runs failed. * `USER_CANCELED`: The run was successfully canceled during execution by a user. * `CANCELED`: The run was canceled during execution by the Databricks platform; for example, if the maximum run duration was exceeded. * `SKIPPED`: Run was never executed, for example, if the upstream task run failed, the dependency type condition was not met, or there were no material tasks to execute. * `INTERNAL_ERROR`: The run encountered an unexpected error. Refer to the state message for further details. * `DRIVER_ERROR`: The run encountered an error while communicating with the Spark Driver. * `CLUSTER_ERROR`: The run failed due to a cluster error. Refer to the state message for further details. * `REPOSITORY_CHECKOUT_FAILED`: Failed to complete the checkout due to an error when communicating with the third party service. * `INVALID_CLUSTER_REQUEST`: The run failed because it issued an invalid request to start the cluster. * `WORKSPACE_RUN_LIMIT_EXCEEDED`: The workspace has reached the quota for the maximum number of concurrent active runs. Consider scheduling the runs over a larger time frame. * `FEATURE_DISABLED`: The run failed because it tried to access a feature unavailable for the workspace. * `CLUSTER_REQUEST_LIMIT_EXCEEDED`: The number of cluster creation, start, and upsize requests have exceeded the allotted rate limit. Consider spreading the run execution over a larger time frame. * `STORAGE_ACCESS_ERROR`: The run failed due to an error when accessing the customer blob storage. Refer to the state message for further details. * `RUN_EXECUTION_ERROR`: The run was completed with task failures. For more details, refer to the state message or run output. * `UNAUTHORIZED_ERROR`: The run failed due to a permission issue while accessing a resource. Refer to the state message for further details. * `LIBRARY_INSTALLATION_ERROR`: The run failed while installing the user-requested library. Refer to the state message for further details. The causes might include, but are not limited to: The provided library is invalid, there are insufficient permissions to install the library, and so forth. * `MAX_CONCURRENT_RUNS_EXCEEDED`: The scheduled run exceeds the limit of maximum concurrent runs set for the job. * `MAX_SPARK_CONTEXTS_EXCEEDED`: The run is scheduled on a cluster that has already reached the maximum number of contexts it is configured to create. See: [Link]. * `RESOURCE_NOT_FOUND`: A resource necessary for run execution does not exist. Refer to the state message for further details. * `INVALID_RUN_CONFIGURATION`: The run failed due to an invalid configuration. Refer to the state message for further details. * `CLOUD_FAILURE`: The run failed due to a cloud provider issue. Refer to the state message for further details. * `MAX_JOB_QUEUE_SIZE_EXCEEDED`: The run was skipped due to reaching the job level queue size limit. * `DISABLED`: The run was never executed because it was disabled explicitly by the user. + The code indicates why the run was terminated. Additional codes might be introduced in future releases. * `SUCCESS`: The run was completed successfully. * `SUCCESS_WITH_FAILURES`: The run was completed successfully but some child runs failed. * `USER_CANCELED`: The run was successfully canceled during execution by a user. * `CANCELED`: The run was canceled during execution by the Databricks platform; for example, if the maximum run duration was exceeded. * `SKIPPED`: Run was never executed, for example, if the upstream task run failed, the dependency type condition was not met, or there were no material tasks to execute. * `INTERNAL_ERROR`: The run encountered an unexpected error. Refer to the state message for further details. * `DRIVER_ERROR`: The run encountered an error while communicating with the Spark Driver. * `CLUSTER_ERROR`: The run failed due to a cluster error. Refer to the state message for further details. * `REPOSITORY_CHECKOUT_FAILED`: Failed to complete the checkout due to an error when communicating with the third party service. * `INVALID_CLUSTER_REQUEST`: The run failed because it issued an invalid request to start the cluster. * `WORKSPACE_RUN_LIMIT_EXCEEDED`: The workspace has reached the quota for the maximum number of concurrent active runs. Consider scheduling the runs over a larger time frame. * `FEATURE_DISABLED`: The run failed because it tried to access a feature unavailable for the workspace. * `CLUSTER_REQUEST_LIMIT_EXCEEDED`: The number of cluster creation, start, and upsize requests have exceeded the allotted rate limit. Consider spreading the run execution over a larger time frame. * `STORAGE_ACCESS_ERROR`: The run failed due to an error when accessing the customer blob storage. Refer to the state message for further details. * `RUN_EXECUTION_ERROR`: The run was completed with task failures. For more details, refer to the state message or run output. * `UNAUTHORIZED_ERROR`: The run failed due to a permission issue while accessing a resource. Refer to the state message for further details. * `LIBRARY_INSTALLATION_ERROR`: The run failed while installing the user-requested library. Refer to the state message for further details. The causes might include, but are not limited to: The provided library is invalid, there are insufficient permissions to install the library, and so forth. * `MAX_CONCURRENT_RUNS_EXCEEDED`: The scheduled run exceeds the limit of maximum concurrent runs set for the job. * `MAX_SPARK_CONTEXTS_EXCEEDED`: The run is scheduled on a cluster that has already reached the maximum number of contexts it is configured to create. See: [Link]. * `RESOURCE_NOT_FOUND`: A resource necessary for run execution does not exist. Refer to the state message for further details. * `INVALID_RUN_CONFIGURATION`: The run failed due to an invalid configuration. Refer to the state message for further details. * `CLOUD_FAILURE`: The run failed due to a cloud provider issue. Refer to the state message for further details. * `MAX_JOB_QUEUE_SIZE_EXCEEDED`: The run was skipped due to reaching the job level queue size limit. * `DISABLED`: The run was never executed because it was disabled explicitly by the user. * `BREAKING_CHANGE`: Run failed because of an intentional breaking change in Spark, but it will be retried with a mitigation config. [Link]: https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now .. py:attribute:: BUDGET_POLICY_LIMIT_EXCEEDED diff --git a/docs/dbdataclasses/ml.rst b/docs/dbdataclasses/ml.rst index 55d11035a..6042988a1 100644 --- a/docs/dbdataclasses/ml.rst +++ b/docs/dbdataclasses/ml.rst @@ -231,10 +231,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: Feature - :members: - :undoc-members: - .. autoclass:: FeatureLineage :members: :undoc-members: @@ -352,6 +348,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: LinkedFeature + :members: + :undoc-members: + .. autoclass:: ListArtifactsResponse :members: :undoc-members: @@ -551,6 +551,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: CONTINUOUS :value: "CONTINUOUS" + .. py:attribute:: SNAPSHOT + :value: "SNAPSHOT" + .. py:attribute:: TRIGGERED :value: "TRIGGERED" diff --git a/docs/dbdataclasses/pipelines.rst b/docs/dbdataclasses/pipelines.rst index 3b017a455..9c0e96bf8 100644 --- a/docs/dbdataclasses/pipelines.rst +++ b/docs/dbdataclasses/pipelines.rst @@ -350,6 +350,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: PostgresCatalogConfig + :members: + :undoc-members: + +.. autoclass:: PostgresSlotConfig + :members: + :undoc-members: + .. autoclass:: ReportSpec :members: :undoc-members: @@ -374,6 +382,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: SourceCatalogConfig + :members: + :undoc-members: + +.. autoclass:: SourceConfig + :members: + :undoc-members: + .. autoclass:: StackFrame :members: :undoc-members: diff --git a/docs/dbdataclasses/settingsv2.rst b/docs/dbdataclasses/settingsv2.rst new file mode 100644 index 000000000..583fe9aec --- /dev/null +++ b/docs/dbdataclasses/settingsv2.rst @@ -0,0 +1,159 @@ +SettingsV2 +========== + +These dataclasses are used in the SDK to represent API requests and responses for services in the ``databricks.sdk.service.settingsv2`` module. + +.. py:currentmodule:: databricks.sdk.service.settingsv2 +.. autoclass:: AibiDashboardEmbeddingAccessPolicy + :members: + :undoc-members: + +.. py:class:: AibiDashboardEmbeddingAccessPolicyAccessPolicyType + + .. py:attribute:: ALLOW_ALL_DOMAINS + :value: "ALLOW_ALL_DOMAINS" + + .. py:attribute:: ALLOW_APPROVED_DOMAINS + :value: "ALLOW_APPROVED_DOMAINS" + + .. py:attribute:: DENY_ALL_DOMAINS + :value: "DENY_ALL_DOMAINS" + +.. autoclass:: AibiDashboardEmbeddingApprovedDomains + :members: + :undoc-members: + +.. autoclass:: BooleanMessage + :members: + :undoc-members: + +.. autoclass:: ClusterAutoRestartMessage + :members: + :undoc-members: + +.. autoclass:: ClusterAutoRestartMessageEnablementDetails + :members: + :undoc-members: + +.. autoclass:: ClusterAutoRestartMessageMaintenanceWindow + :members: + :undoc-members: + +.. py:class:: ClusterAutoRestartMessageMaintenanceWindowDayOfWeek + + .. py:attribute:: FRIDAY + :value: "FRIDAY" + + .. py:attribute:: MONDAY + :value: "MONDAY" + + .. py:attribute:: SATURDAY + :value: "SATURDAY" + + .. py:attribute:: SUNDAY + :value: "SUNDAY" + + .. py:attribute:: THURSDAY + :value: "THURSDAY" + + .. py:attribute:: TUESDAY + :value: "TUESDAY" + + .. py:attribute:: WEDNESDAY + :value: "WEDNESDAY" + +.. autoclass:: ClusterAutoRestartMessageMaintenanceWindowWeekDayBasedSchedule + :members: + :undoc-members: + +.. py:class:: ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency + + .. py:attribute:: EVERY_WEEK + :value: "EVERY_WEEK" + + .. py:attribute:: FIRST_AND_THIRD_OF_MONTH + :value: "FIRST_AND_THIRD_OF_MONTH" + + .. py:attribute:: FIRST_OF_MONTH + :value: "FIRST_OF_MONTH" + + .. py:attribute:: FOURTH_OF_MONTH + :value: "FOURTH_OF_MONTH" + + .. py:attribute:: SECOND_AND_FOURTH_OF_MONTH + :value: "SECOND_AND_FOURTH_OF_MONTH" + + .. py:attribute:: SECOND_OF_MONTH + :value: "SECOND_OF_MONTH" + + .. py:attribute:: THIRD_OF_MONTH + :value: "THIRD_OF_MONTH" + +.. autoclass:: ClusterAutoRestartMessageMaintenanceWindowWindowStartTime + :members: + :undoc-members: + +.. autoclass:: DefaultDataSecurityModeMessage + :members: + :undoc-members: + +.. py:class:: DefaultDataSecurityModeMessageStatus + + .. py:attribute:: NOT_SET + :value: "NOT_SET" + + .. py:attribute:: SINGLE_USER + :value: "SINGLE_USER" + + .. py:attribute:: USER_ISOLATION + :value: "USER_ISOLATION" + +.. autoclass:: IntegerMessage + :members: + :undoc-members: + +.. autoclass:: ListAccountSettingsMetadataResponse + :members: + :undoc-members: + +.. autoclass:: ListWorkspaceSettingsMetadataResponse + :members: + :undoc-members: + +.. autoclass:: PersonalComputeMessage + :members: + :undoc-members: + +.. py:class:: PersonalComputeMessagePersonalComputeMessageEnum + + ON: Grants all users in all workspaces access to the Personal Compute default policy, allowing all users to create single-machine compute resources. DELEGATE: Moves access control for the Personal Compute default policy to individual workspaces and requires a workspace’s users or groups to be added to the ACLs of that workspace’s Personal Compute default policy before they will be able to create compute resources through that policy. + + .. py:attribute:: DELEGATE + :value: "DELEGATE" + + .. py:attribute:: ON + :value: "ON" + +.. autoclass:: RestrictWorkspaceAdminsMessage + :members: + :undoc-members: + +.. py:class:: RestrictWorkspaceAdminsMessageStatus + + .. py:attribute:: ALLOW_ALL + :value: "ALLOW_ALL" + + .. py:attribute:: RESTRICT_TOKENS_AND_JOB_RUN_AS + :value: "RESTRICT_TOKENS_AND_JOB_RUN_AS" + +.. autoclass:: Setting + :members: + :undoc-members: + +.. autoclass:: SettingsMetadata + :members: + :undoc-members: + +.. autoclass:: StringMessage + :members: + :undoc-members: diff --git a/docs/dbdataclasses/sql.rst b/docs/dbdataclasses/sql.rst index 8afa33192..3dd84f229 100644 --- a/docs/dbdataclasses/sql.rst +++ b/docs/dbdataclasses/sql.rst @@ -153,6 +153,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: AlertV2RunAs + :members: + :undoc-members: + .. autoclass:: AlertV2Subscription :members: :undoc-members: diff --git a/docs/dbdataclasses/tags.rst b/docs/dbdataclasses/tags.rst new file mode 100644 index 000000000..23eb1d728 --- /dev/null +++ b/docs/dbdataclasses/tags.rst @@ -0,0 +1,17 @@ +Tags +==== + +These dataclasses are used in the SDK to represent API requests and responses for services in the ``databricks.sdk.service.tags`` module. + +.. py:currentmodule:: databricks.sdk.service.tags +.. autoclass:: ListTagPoliciesResponse + :members: + :undoc-members: + +.. autoclass:: TagPolicy + :members: + :undoc-members: + +.. autoclass:: Value + :members: + :undoc-members: diff --git a/docs/dbdataclasses/vectorsearch.rst b/docs/dbdataclasses/vectorsearch.rst index d68e083d9..b6250cc64 100644 --- a/docs/dbdataclasses/vectorsearch.rst +++ b/docs/dbdataclasses/vectorsearch.rst @@ -125,6 +125,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: RerankerConfig + :members: + :undoc-members: + +.. autoclass:: RerankerConfigRerankerParameters + :members: + :undoc-members: + .. autoclass:: ResultData :members: :undoc-members: diff --git a/docs/gen-client-docs.py b/docs/gen-client-docs.py index a48d1a7ab..3db599ebe 100644 --- a/docs/gen-client-docs.py +++ b/docs/gen-client-docs.py @@ -238,6 +238,8 @@ class Generator: Package("sharing", "Delta Sharing", "Configure data sharing with Unity Catalog for providers, recipients, and shares"), Package("settings", "Settings", "Manage security settings for Accounts and Workspaces"), + Package("settingsv2", "SettingsV2", "Manage admin settings"), + Package("tags", "Tags", "Manage tag policies and tag assignments on workspace objects"), Package( "provisioning", "Provisioning", "Resource management for secure Databricks Workspace deployment, cross-account IAM roles, " + diff --git a/docs/workspace/agentbricks/agent_bricks.rst b/docs/workspace/agentbricks/agent_bricks.rst index ca017c49a..bb4e4891f 100644 --- a/docs/workspace/agentbricks/agent_bricks.rst +++ b/docs/workspace/agentbricks/agent_bricks.rst @@ -24,9 +24,9 @@ :param instructions: str Instructions for the custom LLM to follow :param agent_artifact_path: str (optional) - Optional: UC path for agent artifacts. If you are using a dataset that you only have read - permissions, please provide a destination path where you have write permissions. Please provide this - in catalog.schema format. + This will soon be deprecated!! Optional: UC path for agent artifacts. If you are using a dataset + that you only have read permissions, please provide a destination path where you have write + permissions. Please provide this in catalog.schema format. :param datasets: List[:class:`Dataset`] (optional) Datasets used for training and evaluating the model, not for inference. Currently, only 1 dataset is accepted. diff --git a/docs/workspace/apps/apps_settings.rst b/docs/workspace/apps/apps_settings.rst new file mode 100644 index 000000000..acea870cb --- /dev/null +++ b/docs/workspace/apps/apps_settings.rst @@ -0,0 +1,60 @@ +``w.apps_settings``: Apps Settings +================================== +.. currentmodule:: databricks.sdk.service.apps + +.. py:class:: AppsSettingsAPI + + Apps Settings manage the settings for the Apps service on a customer's Databricks instance. + + .. py:method:: create_custom_template(template: CustomTemplate) -> CustomTemplate + + Creates a custom template. + + :param template: :class:`CustomTemplate` + + :returns: :class:`CustomTemplate` + + + .. py:method:: delete_custom_template(name: str) -> CustomTemplate + + Deletes the custom template with the specified name. + + :param name: str + The name of the custom template. + + :returns: :class:`CustomTemplate` + + + .. py:method:: get_custom_template(name: str) -> CustomTemplate + + Gets the custom template with the specified name. + + :param name: str + The name of the custom template. + + :returns: :class:`CustomTemplate` + + + .. py:method:: list_custom_templates( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[CustomTemplate] + + Lists all custom templates in the workspace. + + :param page_size: int (optional) + Upper bound for items returned. + :param page_token: str (optional) + Pagination token to go to the next page of custom templates. Requests first page if absent. + + :returns: Iterator over :class:`CustomTemplate` + + + .. py:method:: update_custom_template(name: str, template: CustomTemplate) -> CustomTemplate + + Updates the custom template with the specified name. Note that the template name cannot be updated. + + :param name: str + The name of the template. It must contain only alphanumeric characters, hyphens, underscores, and + whitespaces. It must be unique within the workspace. + :param template: :class:`CustomTemplate` + + :returns: :class:`CustomTemplate` + \ No newline at end of file diff --git a/docs/workspace/apps/index.rst b/docs/workspace/apps/index.rst index bd21c93a5..9e4c97176 100644 --- a/docs/workspace/apps/index.rst +++ b/docs/workspace/apps/index.rst @@ -7,4 +7,5 @@ Build custom applications on Databricks .. toctree:: :maxdepth: 1 - apps \ No newline at end of file + apps + apps_settings \ No newline at end of file diff --git a/docs/workspace/catalog/connections.rst b/docs/workspace/catalog/connections.rst index 75da50675..ec7f39be2 100644 --- a/docs/workspace/catalog/connections.rst +++ b/docs/workspace/catalog/connections.rst @@ -13,7 +13,7 @@ objects based on cloud storage. Users may create different types of connections with each connection having a unique set of configuration options to support credential management and other settings. - .. py:method:: create(name: str, connection_type: ConnectionType, options: Dict[str, str] [, comment: Optional[str], environment_settings: Optional[EnvironmentSettings], properties: Optional[Dict[str, str]], read_only: Optional[bool]]) -> ConnectionInfo + .. py:method:: create(name: str, connection_type: ConnectionType, options: Dict[str, str] [, comment: Optional[str], properties: Optional[Dict[str, str]], read_only: Optional[bool]]) -> ConnectionInfo Usage: @@ -54,8 +54,6 @@ A map of key-value properties attached to the securable. :param comment: str (optional) User-provided free-form text description. - :param environment_settings: :class:`EnvironmentSettings` (optional) - [Create,Update:OPT] Connection environment settings as EnvironmentSettings object. :param properties: Dict[str,str] (optional) A map of key-value properties attached to the securable. :param read_only: bool (optional) @@ -148,7 +146,7 @@ :returns: Iterator over :class:`ConnectionInfo` - .. py:method:: update(name: str, options: Dict[str, str] [, environment_settings: Optional[EnvironmentSettings], new_name: Optional[str], owner: Optional[str]]) -> ConnectionInfo + .. py:method:: update(name: str, options: Dict[str, str] [, new_name: Optional[str], owner: Optional[str]]) -> ConnectionInfo Usage: @@ -191,8 +189,6 @@ Name of the connection. :param options: Dict[str,str] A map of key-value properties attached to the securable. - :param environment_settings: :class:`EnvironmentSettings` (optional) - [Create,Update:OPT] Connection environment settings as EnvironmentSettings object. :param new_name: str (optional) New name for the connection. :param owner: str (optional) diff --git a/docs/workspace/catalog/entity_tag_assignments.rst b/docs/workspace/catalog/entity_tag_assignments.rst new file mode 100644 index 000000000..fcba2bbae --- /dev/null +++ b/docs/workspace/catalog/entity_tag_assignments.rst @@ -0,0 +1,119 @@ +``w.entity_tag_assignments``: Entity Tag Assignments +==================================================== +.. currentmodule:: databricks.sdk.service.catalog + +.. py:class:: EntityTagAssignmentsAPI + + Tags are attributes that include keys and optional values that you can use to organize and categorize + entities in Unity Catalog. Entity tagging is currently supported on catalogs, schemas, tables (including + views), columns, volumes. With these APIs, users can create, update, delete, and list tag assignments + across Unity Catalog entities + + .. py:method:: create(tag_assignment: EntityTagAssignment) -> EntityTagAssignment + + Creates a tag assignment for an Unity Catalog entity. + + To add tags to Unity Catalog entities, you must own the entity or have the following privileges: - + **APPLY TAG** on the entity - **USE SCHEMA** on the entity's parent schema - **USE CATALOG** on the + entity's parent catalog + + To add a governed tag to Unity Catalog entities, you must also have the **ASSIGN** or **MANAGE** + permission on the tag policy. See [Manage tag policy permissions]. + + [Manage tag policy permissions]: https://docs.databricks.com/aws/en/admin/tag-policies/manage-permissions + + :param tag_assignment: :class:`EntityTagAssignment` + + :returns: :class:`EntityTagAssignment` + + + .. py:method:: delete(entity_type: str, entity_name: str, tag_key: str) + + Deletes a tag assignment for an Unity Catalog entity by its key. + + To delete tags from Unity Catalog entities, you must own the entity or have the following privileges: + - **APPLY TAG** on the entity - **USE_SCHEMA** on the entity's parent schema - **USE_CATALOG** on the + entity's parent catalog + + To delete a governed tag from Unity Catalog entities, you must also have the **ASSIGN** or **MANAGE** + permission on the tag policy. See [Manage tag policy permissions]. + + [Manage tag policy permissions]: https://docs.databricks.com/aws/en/admin/tag-policies/manage-permissions + + :param entity_type: str + The type of the entity to which the tag is assigned. Allowed values are: catalogs, schemas, tables, + columns, volumes. + :param entity_name: str + The fully qualified name of the entity to which the tag is assigned + :param tag_key: str + Required. The key of the tag to delete + + + + + .. py:method:: get(entity_type: str, entity_name: str, tag_key: str) -> EntityTagAssignment + + Gets a tag assignment for an Unity Catalog entity by tag key. + + :param entity_type: str + The type of the entity to which the tag is assigned. Allowed values are: catalogs, schemas, tables, + columns, volumes. + :param entity_name: str + The fully qualified name of the entity to which the tag is assigned + :param tag_key: str + Required. The key of the tag + + :returns: :class:`EntityTagAssignment` + + + .. py:method:: list(entity_type: str, entity_name: str [, max_results: Optional[int], page_token: Optional[str]]) -> Iterator[EntityTagAssignment] + + List tag assignments for an Unity Catalog entity + + :param entity_type: str + The type of the entity to which the tag is assigned. Allowed values are: catalogs, schemas, tables, + columns, volumes. + :param entity_name: str + The fully qualified name of the entity to which the tag is assigned + :param max_results: int (optional) + Optional. Maximum number of tag assignments to return in a single page + :param page_token: str (optional) + Optional. Pagination token to retrieve the next page of results + + :returns: Iterator over :class:`EntityTagAssignment` + + + .. py:method:: update(entity_type: str, entity_name: str, tag_key: str, tag_assignment: EntityTagAssignment, update_mask: str) -> EntityTagAssignment + + Updates an existing tag assignment for an Unity Catalog entity. + + To update tags to Unity Catalog entities, you must own the entity or have the following privileges: - + **APPLY TAG** on the entity - **USE SCHEMA** on the entity's parent schema - **USE CATALOG** on the + entity's parent catalog + + To update a governed tag to Unity Catalog entities, you must also have the **ASSIGN** or **MANAGE** + permission on the tag policy. See [Manage tag policy permissions]. + + [Manage tag policy permissions]: https://docs.databricks.com/aws/en/admin/tag-policies/manage-permissions + + :param entity_type: str + The type of the entity to which the tag is assigned. Allowed values are: catalogs, schemas, tables, + columns, volumes. + :param entity_name: str + The fully qualified name of the entity to which the tag is assigned + :param tag_key: str + The key of the tag + :param tag_assignment: :class:`EntityTagAssignment` + :param update_mask: str + The field mask must be a single string, with multiple fields separated by commas (no spaces). The + field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., + `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only + the entire collection field can be specified. Field names must exactly match the resource field + names. + + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the + fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API + changes in the future. + + :returns: :class:`EntityTagAssignment` + \ No newline at end of file diff --git a/docs/workspace/catalog/index.rst b/docs/workspace/catalog/index.rst index 1a84c4e74..1cdb8fc69 100644 --- a/docs/workspace/catalog/index.rst +++ b/docs/workspace/catalog/index.rst @@ -11,6 +11,7 @@ Configure data governance with Unity Catalog for metastores, catalogs, schemas, catalogs connections credentials + entity_tag_assignments external_lineage external_locations external_metadata @@ -19,14 +20,17 @@ Configure data governance with Unity Catalog for metastores, catalogs, schemas, metastores model_versions online_tables + policies quality_monitors registered_models resource_quotas + rfa schemas storage_credentials system_schemas table_constraints tables + temporary_path_credentials temporary_table_credentials volumes workspace_bindings \ No newline at end of file diff --git a/docs/workspace/catalog/policies.rst b/docs/workspace/catalog/policies.rst new file mode 100644 index 000000000..2eb9e6a99 --- /dev/null +++ b/docs/workspace/catalog/policies.rst @@ -0,0 +1,96 @@ +``w.policies``: ABAC Policies +============================= +.. currentmodule:: databricks.sdk.service.catalog + +.. py:class:: PoliciesAPI + + Attribute-Based Access Control (ABAC) provides high leverage governance for enforcing compliance policies + in Unity Catalog. With ABAC policies, access is controlled in a hierarchical and scalable manner, based on + data attributes rather than specific resources, enabling more flexible and comprehensive access control. + ABAC policies in Unity Catalog support conditions on securable properties, governance tags, and + environment contexts. Callers must have the `MANAGE` privilege on a securable to view, create, update, or + delete ABAC policies. + + .. py:method:: create_policy(policy_info: PolicyInfo) -> PolicyInfo + + Creates a new policy on a securable. The new policy applies to the securable and all its descendants. + + :param policy_info: :class:`PolicyInfo` + Required. The policy to create. + + :returns: :class:`PolicyInfo` + + + .. py:method:: delete_policy(on_securable_type: str, on_securable_fullname: str, name: str) -> DeletePolicyResponse + + Delete an ABAC policy defined on a securable. + + :param on_securable_type: str + Required. The type of the securable to delete the policy from. + :param on_securable_fullname: str + Required. The fully qualified name of the securable to delete the policy from. + :param name: str + Required. The name of the policy to delete + + :returns: :class:`DeletePolicyResponse` + + + .. py:method:: get_policy(on_securable_type: str, on_securable_fullname: str, name: str) -> PolicyInfo + + Get the policy definition on a securable + + :param on_securable_type: str + Required. The type of the securable to retrieve the policy for. + :param on_securable_fullname: str + Required. The fully qualified name of securable to retrieve policy for. + :param name: str + Required. The name of the policy to retrieve. + + :returns: :class:`PolicyInfo` + + + .. py:method:: list_policies(on_securable_type: str, on_securable_fullname: str [, include_inherited: Optional[bool], max_results: Optional[int], page_token: Optional[str]]) -> Iterator[PolicyInfo] + + List all policies defined on a securable. Optionally, the list can include inherited policies defined + on the securable's parent schema or catalog. + + :param on_securable_type: str + Required. The type of the securable to list policies for. + :param on_securable_fullname: str + Required. The fully qualified name of securable to list policies for. + :param include_inherited: bool (optional) + Optional. Whether to include policies defined on parent securables. By default, the inherited + policies are not included. + :param max_results: int (optional) + Optional. Maximum number of policies to return on a single page (page length). - When not set or set + to 0, the page length is set to a server configured value (recommended); - When set to a value + greater than 0, the page length is the minimum of this value and a server configured value; + :param page_token: str (optional) + Optional. Opaque pagination token to go to next page based on previous query. + + :returns: Iterator over :class:`PolicyInfo` + + + .. py:method:: update_policy(on_securable_type: str, on_securable_fullname: str, name: str, policy_info: PolicyInfo [, update_mask: Optional[str]]) -> PolicyInfo + + Update an ABAC policy on a securable. + + :param on_securable_type: str + Required. The type of the securable to update the policy for. + :param on_securable_fullname: str + Required. The fully qualified name of the securable to update the policy for. + :param name: str + Required. The name of the policy to update. + :param policy_info: :class:`PolicyInfo` + Optional fields to update. This is the request body for updating a policy. Use `update_mask` field + to specify which fields in the request is to be updated. - If `update_mask` is empty or "*", all + specified fields will be updated. - If `update_mask` is specified, only the fields specified in the + `update_mask` will be updated. If a field is specified in `update_mask` and not set in the request, + the field will be cleared. Users can use the update mask to explicitly unset optional fields such as + `exception_principals` and `when_condition`. + :param update_mask: str (optional) + Optional. The update mask field for specifying user intentions on which fields to update in the + request. + + :returns: :class:`PolicyInfo` + \ No newline at end of file diff --git a/docs/workspace/catalog/rfa.rst b/docs/workspace/catalog/rfa.rst new file mode 100644 index 000000000..3019403bb --- /dev/null +++ b/docs/workspace/catalog/rfa.rst @@ -0,0 +1,77 @@ +``w.rfa``: Request for Access +============================= +.. currentmodule:: databricks.sdk.service.catalog + +.. py:class:: RfaAPI + + Request for Access enables customers to request access to and manage access request destinations for Unity + Catalog securables. + + These APIs provide a standardized way to update, get, and request to access request destinations. + Fine-grained authorization ensures that only users with appropriate permissions can manage access request + destinations. + + .. py:method:: batch_create_access_requests( [, requests: Optional[List[CreateAccessRequest]]]) -> BatchCreateAccessRequestsResponse + + Creates access requests for Unity Catalog permissions for a specified principal on a securable object. + This Batch API can take in multiple principals, securable objects, and permissions as the input and + returns the access request destinations for each. Principals must be unique across the API call. + + The supported securable types are: "metastore", "catalog", "schema", "table", "external_location", + "connection", "credential", "function", "registered_model", and "volume". + + :param requests: List[:class:`CreateAccessRequest`] (optional) + A list of individual access requests, where each request corresponds to a set of permissions being + requested on a list of securables for a specified principal. + + At most 30 requests per API call. + + :returns: :class:`BatchCreateAccessRequestsResponse` + + + .. py:method:: get_access_request_destinations(securable_type: str, full_name: str) -> AccessRequestDestinations + + Gets an array of access request destinations for the specified securable. Any caller can see URL + destinations or the destinations on the metastore. Otherwise, only those with **BROWSE** permissions + on the securable can see destinations. + + The supported securable types are: "metastore", "catalog", "schema", "table", "external_location", + "connection", "credential", "function", "registered_model", and "volume". + + :param securable_type: str + The type of the securable. + :param full_name: str + The full name of the securable. + + :returns: :class:`AccessRequestDestinations` + + + .. py:method:: update_access_request_destinations(access_request_destinations: AccessRequestDestinations, update_mask: str) -> AccessRequestDestinations + + Updates the access request destinations for the given securable. The caller must be a metastore admin, + the owner of the securable, or a user that has the **MANAGE** privilege on the securable in order to + assign destinations. Destinations cannot be updated for securables underneath schemas (tables, + volumes, functions, and models). For these securable types, destinations are inherited from the parent + securable. A maximum of 5 emails and 5 external notification destinations (Slack, Microsoft Teams, and + Generic Webhook destinations) can be assigned to a securable. If a URL destination is assigned, no + other destinations can be set. + + The supported securable types are: "metastore", "catalog", "schema", "table", "external_location", + "connection", "credential", "function", "registered_model", and "volume". + + :param access_request_destinations: :class:`AccessRequestDestinations` + The access request destinations to assign to the securable. For each destination, a + **destination_id** and **destination_type** must be defined. + :param update_mask: str + The field mask must be a single string, with multiple fields separated by commas (no spaces). The + field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., + `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only + the entire collection field can be specified. Field names must exactly match the resource field + names. + + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the + fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API + changes in the future. + + :returns: :class:`AccessRequestDestinations` + \ No newline at end of file diff --git a/docs/workspace/catalog/schemas.rst b/docs/workspace/catalog/schemas.rst index 60a4eb79d..fd1d323a1 100644 --- a/docs/workspace/catalog/schemas.rst +++ b/docs/workspace/catalog/schemas.rst @@ -30,7 +30,7 @@ w.catalogs.delete(name=created_catalog.name, force=True) w.schemas.delete(full_name=created_schema.full_name) - Creates a new schema for catalog in the Metatastore. The caller must be a metastore admin, or have the + Creates a new schema for catalog in the Metastore. The caller must be a metastore admin, or have the **CREATE_SCHEMA** privilege in the parent catalog. :param name: str diff --git a/docs/workspace/catalog/tables.rst b/docs/workspace/catalog/tables.rst index efeea33f6..c4b6dad3d 100644 --- a/docs/workspace/catalog/tables.rst +++ b/docs/workspace/catalog/tables.rst @@ -13,6 +13,45 @@ A table can be managed or external. From an API perspective, a __VIEW__ is a particular kind of table (rather than a managed or external table). + .. py:method:: create(name: str, catalog_name: str, schema_name: str, table_type: TableType, data_source_format: DataSourceFormat, storage_location: str [, columns: Optional[List[ColumnInfo]], properties: Optional[Dict[str, str]]]) -> TableInfo + + Creates a new table in the specified catalog and schema. + + To create an external delta table, the caller must have the **EXTERNAL_USE_SCHEMA** privilege on the + parent schema and the **EXTERNAL_USE_LOCATION** privilege on the external location. These privileges + must always be granted explicitly, and cannot be inherited through ownership or **ALL_PRIVILEGES**. + + Standard UC permissions needed to create tables still apply: **USE_CATALOG** on the parent catalog (or + ownership of the parent catalog), **CREATE_TABLE** and **USE_SCHEMA** on the parent schema (or + ownership of the parent schema), and **CREATE_EXTERNAL_TABLE** on external location. + + The **columns** field needs to be in a Spark compatible format, so we recommend you use Spark to + create these tables. The API itself does not validate the correctness of the column spec. If the spec + is not Spark compatible, the tables may not be readable by Databricks Runtime. + + NOTE: The Create Table API for external clients only supports creating **external delta tables**. The + values shown in the respective enums are all values supported by Databricks, however for this specific + Create Table API, only **table_type** **EXTERNAL** and **data_source_format** **DELTA** are supported. + Additionally, column masks are not supported when creating tables through this API. + + :param name: str + Name of table, relative to parent schema. + :param catalog_name: str + Name of parent catalog. + :param schema_name: str + Name of parent schema relative to its parent catalog. + :param table_type: :class:`TableType` + :param data_source_format: :class:`DataSourceFormat` + :param storage_location: str + Storage root URL for table (for **MANAGED**, **EXTERNAL** tables). + :param columns: List[:class:`ColumnInfo`] (optional) + The array of __ColumnInfo__ definitions of the table's columns. + :param properties: Dict[str,str] (optional) + A map of key-value properties attached to the securable. + + :returns: :class:`TableInfo` + + .. py:method:: delete(full_name: str) Deletes a table from the specified parent catalog and schema. The caller must be the owner of the @@ -30,10 +69,10 @@ Gets if a table exists in the metastore for a specific catalog and schema. The caller must satisfy one of the following requirements: * Be a metastore admin * Be the owner of the parent catalog * Be the - owner of the parent schema and have the USE_CATALOG privilege on the parent catalog * Have the + owner of the parent schema and have the **USE_CATALOG** privilege on the parent catalog * Have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema, - and either be the table owner or have the SELECT privilege on the table. * Have BROWSE privilege on - the parent catalog * Have BROWSE privilege on the parent schema. + and either be the table owner or have the **SELECT** privilege on the table. * Have **BROWSE** + privilege on the parent catalog * Have **BROWSE** privilege on the parent schema :param full_name: str Full name of the table. @@ -83,9 +122,9 @@ Gets a table from the metastore for a specific catalog and schema. The caller must satisfy one of the following requirements: * Be a metastore admin * Be the owner of the parent catalog * Be the owner of - the parent schema and have the USE_CATALOG privilege on the parent catalog * Have the **USE_CATALOG** - privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema, and either be - the table owner or have the SELECT privilege on the table. + the parent schema and have the **USE_CATALOG** privilege on the parent catalog * Have the + **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema, + and either be the table owner or have the **SELECT** privilege on the table. :param full_name: str Full name of the table. diff --git a/docs/workspace/catalog/temporary_path_credentials.rst b/docs/workspace/catalog/temporary_path_credentials.rst new file mode 100644 index 000000000..6694f39f8 --- /dev/null +++ b/docs/workspace/catalog/temporary_path_credentials.rst @@ -0,0 +1,51 @@ +``w.temporary_path_credentials``: Temporary Path Credentials +============================================================ +.. currentmodule:: databricks.sdk.service.catalog + +.. py:class:: TemporaryPathCredentialsAPI + + Temporary Path Credentials refer to short-lived, downscoped credentials used to access external cloud + storage locations registered in Databricks. These credentials are employed to provide secure and + time-limited access to data in cloud environments such as AWS, Azure, and Google Cloud. Each cloud + provider has its own type of credentials: AWS uses temporary session tokens via AWS Security Token Service + (STS), Azure utilizes Shared Access Signatures (SAS) for its data storage services, and Google Cloud + supports temporary credentials through OAuth 2.0. + + Temporary path credentials ensure that data access is limited in scope and duration, reducing the risk of + unauthorized access or misuse. To use the temporary path credentials API, a metastore admin needs to + enable the external_access_enabled flag (off by default) at the metastore level. A user needs to be + granted the EXTERNAL USE LOCATION permission by external location owner. For requests on existing external + tables, user also needs to be granted the EXTERNAL USE SCHEMA permission at the schema level by catalog + admin. + + Note that EXTERNAL USE SCHEMA is a schema level permission that can only be granted by catalog admin + explicitly and is not included in schema ownership or ALL PRIVILEGES on the schema for security reasons. + Similarly, EXTERNAL USE LOCATION is an external location level permission that can only be granted by + external location owner explicitly and is not included in external location ownership or ALL PRIVILEGES on + the external location for security reasons. + + This API only supports temporary path credentials for external locations and external tables, and volumes + will be supported in the future. + + .. py:method:: generate_temporary_path_credentials(url: str, operation: PathOperation [, dry_run: Optional[bool]]) -> GenerateTemporaryPathCredentialResponse + + Get a short-lived credential for directly accessing cloud storage locations registered in Databricks. + The Generate Temporary Path Credentials API is only supported for external storage paths, specifically + external locations and external tables. Managed tables are not supported by this API. The metastore + must have **external_access_enabled** flag set to true (default false). The caller must have the + **EXTERNAL_USE_LOCATION** privilege on the external location; this privilege can only be granted by + external location owners. For requests on existing external tables, the caller must also have the + **EXTERNAL_USE_SCHEMA** privilege on the parent schema; this privilege can only be granted by catalog + owners. + + :param url: str + URL for path-based access. + :param operation: :class:`PathOperation` + The operation being performed on the path. + :param dry_run: bool (optional) + Optional. When set to true, the service will not validate that the generated credentials can perform + write operations, therefore no new paths will be created and the response will not contain valid + credentials. Defaults to false. + + :returns: :class:`GenerateTemporaryPathCredentialResponse` + \ No newline at end of file diff --git a/docs/workspace/catalog/temporary_table_credentials.rst b/docs/workspace/catalog/temporary_table_credentials.rst index ee3cc8907..54ccd79b0 100644 --- a/docs/workspace/catalog/temporary_table_credentials.rst +++ b/docs/workspace/catalog/temporary_table_credentials.rst @@ -5,24 +5,25 @@ .. py:class:: TemporaryTableCredentialsAPI Temporary Table Credentials refer to short-lived, downscoped credentials used to access cloud storage - locationswhere table data is stored in Databricks. These credentials are employed to provide secure and - time-limitedaccess to data in cloud environments such as AWS, Azure, and Google Cloud. Each cloud provider - has its own typeof credentials: AWS uses temporary session tokens via AWS Security Token Service (STS), - Azure utilizesShared Access Signatures (SAS) for its data storage services, and Google Cloud supports - temporary credentialsthrough OAuth 2.0.Temporary table credentials ensure that data access is limited in - scope and duration, reducing the risk ofunauthorized access or misuse. To use the temporary table - credentials API, a metastore admin needs to enable the external_access_enabled flag (off by default) at - the metastore level, and user needs to be granted the EXTERNAL USE SCHEMA permission at the schema level - by catalog admin. Note that EXTERNAL USE SCHEMA is a schema level permission that can only be granted by - catalog admin explicitly and is not included in schema ownership or ALL PRIVILEGES on the schema for - security reason. + locations where table data is stored in Databricks. These credentials are employed to provide secure and + time-limited access to data in cloud environments such as AWS, Azure, and Google Cloud. Each cloud + provider has its own type of credentials: AWS uses temporary session tokens via AWS Security Token Service + (STS), Azure utilizes Shared Access Signatures (SAS) for its data storage services, and Google Cloud + supports temporary credentials through OAuth 2.0. + + Temporary table credentials ensure that data access is limited in scope and duration, reducing the risk of + unauthorized access or misuse. To use the temporary table credentials API, a metastore admin needs to + enable the external_access_enabled flag (off by default) at the metastore level, and user needs to be + granted the EXTERNAL USE SCHEMA permission at the schema level by catalog admin. Note that EXTERNAL USE + SCHEMA is a schema level permission that can only be granted by catalog admin explicitly and is not + included in schema ownership or ALL PRIVILEGES on the schema for security reasons. .. py:method:: generate_temporary_table_credentials( [, operation: Optional[TableOperation], table_id: Optional[str]]) -> GenerateTemporaryTableCredentialResponse Get a short-lived credential for directly accessing the table data on cloud storage. The metastore - must have external_access_enabled flag set to true (default false). The caller must have - EXTERNAL_USE_SCHEMA privilege on the parent schema and this privilege can only be granted by catalog - owners. + must have **external_access_enabled** flag set to true (default false). The caller must have the + **EXTERNAL_USE_SCHEMA** privilege on the parent schema and this privilege can only be granted by + catalog owners. :param operation: :class:`TableOperation` (optional) The operation performed against the table data, either READ or READ_WRITE. If READ_WRITE is diff --git a/docs/workspace/cleanrooms/clean_room_assets.rst b/docs/workspace/cleanrooms/clean_room_assets.rst index af7be1f3a..90e136cd7 100644 --- a/docs/workspace/cleanrooms/clean_room_assets.rst +++ b/docs/workspace/cleanrooms/clean_room_assets.rst @@ -22,17 +22,17 @@ :returns: :class:`CleanRoomAsset` - .. py:method:: create_clean_room_asset_review(clean_room_name: str, asset_type: CleanRoomAssetAssetType, name: str, notebook_review: NotebookVersionReview) -> CreateCleanRoomAssetReviewResponse + .. py:method:: create_clean_room_asset_review(clean_room_name: str, asset_type: CleanRoomAssetAssetType, name: str [, notebook_review: Optional[NotebookVersionReview]]) -> CreateCleanRoomAssetReviewResponse - submit an asset review + Submit an asset review :param clean_room_name: str Name of the clean room :param asset_type: :class:`CleanRoomAssetAssetType` - can only be NOTEBOOK_FILE for now + Asset type. Can either be NOTEBOOK_FILE or JAR_ANALYSIS. :param name: str Name of the asset - :param notebook_review: :class:`NotebookVersionReview` + :param notebook_review: :class:`NotebookVersionReview` (optional) :returns: :class:`CreateCleanRoomAssetReviewResponse` @@ -93,7 +93,8 @@ For UC securable assets (tables, volumes, etc.), the format is *shared_catalog*.*shared_schema*.*asset_name* - For notebooks, the name is the notebook file name. + For notebooks, the name is the notebook file name. For jar analyses, the name is the jar analysis + name. :param asset: :class:`CleanRoomAsset` The asset to update. The asset's `name` and `asset_type` fields are used to identify the asset to update. diff --git a/docs/workspace/dashboards/genie.rst b/docs/workspace/dashboards/genie.rst index 86e243930..c0d16b9f8 100644 --- a/docs/workspace/dashboards/genie.rst +++ b/docs/workspace/dashboards/genie.rst @@ -41,6 +41,20 @@ + .. py:method:: delete_conversation_message(space_id: str, conversation_id: str, message_id: str) + + Delete a conversation message. + + :param space_id: str + The ID associated with the Genie space where the message is located. + :param conversation_id: str + The ID associated with the conversation. + :param message_id: str + The ID associated with the message to delete. + + + + .. py:method:: execute_message_attachment_query(space_id: str, conversation_id: str, message_id: str, attachment_id: str) -> GenieGetMessageQueryResultResponse Execute the SQL for a message query attachment. Use this API when the query attachment has expired and @@ -60,7 +74,8 @@ .. py:method:: execute_message_query(space_id: str, conversation_id: str, message_id: str) -> GenieGetMessageQueryResultResponse - Execute the SQL query in the message. + DEPRECATED: Use [Execute Message Attachment Query](:method:genie/executemessageattachmentquery) + instead. :param space_id: str Genie space ID @@ -105,8 +120,8 @@ .. py:method:: get_message_query_result(space_id: str, conversation_id: str, message_id: str) -> GenieGetMessageQueryResultResponse - Get the result of SQL query if the message has a query attachment. This is only available if a message - has a query attachment and the message status is `EXECUTING_QUERY`. + DEPRECATED: Use [Get Message Attachment Query Result](:method:genie/getmessageattachmentqueryresult) + instead. :param space_id: str Genie space ID @@ -120,8 +135,8 @@ .. py:method:: get_message_query_result_by_attachment(space_id: str, conversation_id: str, message_id: str, attachment_id: str) -> GenieGetMessageQueryResultResponse - Get the result of SQL query if the message has a query attachment. This is only available if a message - has a query attachment and the message status is `EXECUTING_QUERY` OR `COMPLETED`. + DEPRECATED: Use [Get Message Attachment Query Result](:method:genie/getmessageattachmentqueryresult) + instead. :param space_id: str Genie space ID @@ -145,12 +160,31 @@ :returns: :class:`GenieSpace` - .. py:method:: list_conversations(space_id: str [, page_size: Optional[int], page_token: Optional[str]]) -> GenieListConversationsResponse + .. py:method:: list_conversation_messages(space_id: str, conversation_id: str [, page_size: Optional[int], page_token: Optional[str]]) -> GenieListConversationMessagesResponse + + List messages in a conversation + + :param space_id: str + The ID associated with the Genie space where the conversation is located + :param conversation_id: str + The ID of the conversation to list messages from + :param page_size: int (optional) + Maximum number of messages to return per page + :param page_token: str (optional) + Token to get the next page of results + + :returns: :class:`GenieListConversationMessagesResponse` + + + .. py:method:: list_conversations(space_id: str [, include_all: Optional[bool], page_size: Optional[int], page_token: Optional[str]]) -> GenieListConversationsResponse Get a list of conversations in a Genie Space. :param space_id: str The ID of the Genie space to retrieve conversations from. + :param include_all: bool (optional) + Include all conversations in the space across all users. Requires "Can Manage" permission on the + space. :param page_size: int (optional) Maximum number of conversations to return per page :param page_token: str (optional) @@ -171,6 +205,24 @@ :returns: :class:`GenieListSpacesResponse` + .. py:method:: send_message_feedback(space_id: str, conversation_id: str, message_id: str, rating: GenieFeedbackRating [, comment: Optional[str]]) + + Send feedback for a message. + + :param space_id: str + The ID associated with the Genie space where the message is located. + :param conversation_id: str + The ID associated with the conversation. + :param message_id: str + The ID associated with the message to provide feedback for. + :param rating: :class:`GenieFeedbackRating` + The rating (POSITIVE, NEGATIVE, or NONE). + :param comment: str (optional) + Optional text feedback that will be stored as a comment. + + + + .. py:method:: start_conversation(space_id: str, content: str) -> Wait[GenieMessage] Start a new conversation. diff --git a/docs/workspace/database/database.rst b/docs/workspace/database/database.rst index ee7a3d4e1..36e594ec7 100644 --- a/docs/workspace/database/database.rst +++ b/docs/workspace/database/database.rst @@ -190,9 +190,25 @@ :returns: :class:`SyncedDatabaseTable` + .. py:method:: list_database_catalogs(instance_name: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[DatabaseCatalog] + + This API is currently unimplemented, but exposed for Terraform support. + + :param instance_name: str + Name of the instance to get database catalogs for. + :param page_size: int (optional) + Upper bound for items returned. + :param page_token: str (optional) + Pagination token to go to the next page of synced database tables. Requests first page if absent. + + :returns: Iterator over :class:`DatabaseCatalog` + + .. py:method:: list_database_instance_roles(instance_name: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[DatabaseInstanceRole] - START OF PG ROLE APIs Section + START OF PG ROLE APIs Section These APIs are marked a PUBLIC with stage < PUBLIC_PREVIEW. With more + recent Lakebase V2 plans, we don't plan to ever advance these to PUBLIC_PREVIEW. These APIs will + remain effectively undocumented/UI-only and we'll aim for a new public roles API as part of V2 PuPr. :param instance_name: str :param page_size: int (optional) @@ -215,6 +231,34 @@ :returns: Iterator over :class:`DatabaseInstance` + .. py:method:: list_synced_database_tables(instance_name: str [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[SyncedDatabaseTable] + + This API is currently unimplemented, but exposed for Terraform support. + + :param instance_name: str + Name of the instance to get synced tables for. + :param page_size: int (optional) + Upper bound for items returned. + :param page_token: str (optional) + Pagination token to go to the next page of synced database tables. Requests first page if absent. + + :returns: Iterator over :class:`SyncedDatabaseTable` + + + .. py:method:: update_database_catalog(name: str, database_catalog: DatabaseCatalog, update_mask: str) -> DatabaseCatalog + + This API is currently unimplemented, but exposed for Terraform support. + + :param name: str + The name of the catalog in UC. + :param database_catalog: :class:`DatabaseCatalog` + Note that updating a database catalog is not yet supported. + :param update_mask: str + The list of fields to update. Setting this field is not yet supported. + + :returns: :class:`DatabaseCatalog` + + .. py:method:: update_database_instance(name: str, database_instance: DatabaseInstance, update_mask: str) -> DatabaseInstance Update a Database Instance. @@ -223,9 +267,24 @@ The name of the instance. This is the unique identifier for the instance. :param database_instance: :class:`DatabaseInstance` :param update_mask: str - The list of fields to update. This field is not yet supported, and is ignored by the server. + The list of fields to update. If unspecified, all fields will be updated when possible. To wipe out + custom_tags, specify custom_tags in the update_mask with an empty custom_tags map. :returns: :class:`DatabaseInstance` + .. py:method:: update_synced_database_table(name: str, synced_table: SyncedDatabaseTable, update_mask: str) -> SyncedDatabaseTable + + This API is currently unimplemented, but exposed for Terraform support. + + :param name: str + Full three-part (catalog, schema, table) name of the table. + :param synced_table: :class:`SyncedDatabaseTable` + Note that updating a synced database table is not yet supported. + :param update_mask: str + The list of fields to update. Setting this field is not yet supported. + + :returns: :class:`SyncedDatabaseTable` + + .. py:method:: wait_get_database_instance_database_available(name: str, timeout: datetime.timedelta = 0:20:00, callback: Optional[Callable[[DatabaseInstance], None]]) -> DatabaseInstance diff --git a/docs/workspace/index.rst b/docs/workspace/index.rst index bc5ca6afb..ec174a770 100644 --- a/docs/workspace/index.rst +++ b/docs/workspace/index.rst @@ -24,7 +24,9 @@ These APIs are available from WorkspaceClient qualitymonitorv2/index serving/index settings/index + settingsv2/index sharing/index sql/index + tags/index vectorsearch/index workspace/index \ No newline at end of file diff --git a/docs/workspace/jobs/jobs.rst b/docs/workspace/jobs/jobs.rst index d68e92a5c..e1d8f668f 100644 --- a/docs/workspace/jobs/jobs.rst +++ b/docs/workspace/jobs/jobs.rst @@ -126,7 +126,7 @@ .. py:method:: cancel_run_and_wait(run_id: int, timeout: datetime.timedelta = 0:20:00) -> Run - .. py:method:: create( [, access_control_list: Optional[List[JobAccessControlRequest]], budget_policy_id: Optional[str], continuous: Optional[Continuous], deployment: Optional[JobDeployment], description: Optional[str], edit_mode: Optional[JobEditMode], email_notifications: Optional[JobEmailNotifications], environments: Optional[List[JobEnvironment]], format: Optional[Format], git_source: Optional[GitSource], health: Optional[JobsHealthRules], job_clusters: Optional[List[JobCluster]], max_concurrent_runs: Optional[int], name: Optional[str], notification_settings: Optional[JobNotificationSettings], parameters: Optional[List[JobParameterDefinition]], performance_target: Optional[PerformanceTarget], queue: Optional[QueueSettings], run_as: Optional[JobRunAs], schedule: Optional[CronSchedule], tags: Optional[Dict[str, str]], tasks: Optional[List[Task]], timeout_seconds: Optional[int], trigger: Optional[TriggerSettings], webhook_notifications: Optional[WebhookNotifications]]) -> CreateResponse + .. py:method:: create( [, access_control_list: Optional[List[JobAccessControlRequest]], budget_policy_id: Optional[str], continuous: Optional[Continuous], deployment: Optional[JobDeployment], description: Optional[str], edit_mode: Optional[JobEditMode], email_notifications: Optional[JobEmailNotifications], environments: Optional[List[JobEnvironment]], format: Optional[Format], git_source: Optional[GitSource], health: Optional[JobsHealthRules], job_clusters: Optional[List[JobCluster]], max_concurrent_runs: Optional[int], name: Optional[str], notification_settings: Optional[JobNotificationSettings], parameters: Optional[List[JobParameterDefinition]], performance_target: Optional[PerformanceTarget], queue: Optional[QueueSettings], run_as: Optional[JobRunAs], schedule: Optional[CronSchedule], tags: Optional[Dict[str, str]], tasks: Optional[List[Task]], timeout_seconds: Optional[int], trigger: Optional[TriggerSettings], usage_policy_id: Optional[str], webhook_notifications: Optional[WebhookNotifications]]) -> CreateResponse Usage: @@ -255,6 +255,10 @@ A configuration to trigger a run when certain conditions are met. The default behavior is that the job runs only when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`. + :param usage_policy_id: str (optional) + The id of the user specified usage policy to use for this job. If not specified, a default usage + policy may be applied when creating or modifying the job. See `effective_usage_policy_id` for the + usage policy used by this workload. :param webhook_notifications: :class:`WebhookNotifications` (optional) A collection of system notification IDs to notify when runs of this job begin or complete. @@ -1012,7 +1016,7 @@ :returns: :class:`JobPermissions` - .. py:method:: submit( [, access_control_list: Optional[List[JobAccessControlRequest]], budget_policy_id: Optional[str], email_notifications: Optional[JobEmailNotifications], environments: Optional[List[JobEnvironment]], git_source: Optional[GitSource], health: Optional[JobsHealthRules], idempotency_token: Optional[str], notification_settings: Optional[JobNotificationSettings], queue: Optional[QueueSettings], run_as: Optional[JobRunAs], run_name: Optional[str], tasks: Optional[List[SubmitTask]], timeout_seconds: Optional[int], webhook_notifications: Optional[WebhookNotifications]]) -> Wait[Run] + .. py:method:: submit( [, access_control_list: Optional[List[JobAccessControlRequest]], budget_policy_id: Optional[str], email_notifications: Optional[JobEmailNotifications], environments: Optional[List[JobEnvironment]], git_source: Optional[GitSource], health: Optional[JobsHealthRules], idempotency_token: Optional[str], notification_settings: Optional[JobNotificationSettings], queue: Optional[QueueSettings], run_as: Optional[JobRunAs], run_name: Optional[str], tasks: Optional[List[SubmitTask]], timeout_seconds: Optional[int], usage_policy_id: Optional[str], webhook_notifications: Optional[WebhookNotifications]]) -> Wait[Run] Usage: @@ -1096,6 +1100,9 @@ :param tasks: List[:class:`SubmitTask`] (optional) :param timeout_seconds: int (optional) An optional timeout applied to each run of this job. A value of `0` means no timeout. + :param usage_policy_id: str (optional) + The user specified id of the usage policy to use for this one-time run. If not specified, a default + usage policy may be applied when creating or modifying the job. :param webhook_notifications: :class:`WebhookNotifications` (optional) A collection of system notification IDs to notify when the run begins or completes. @@ -1104,7 +1111,7 @@ See :method:wait_get_run_job_terminated_or_skipped for more details. - .. py:method:: submit_and_wait( [, access_control_list: Optional[List[JobAccessControlRequest]], budget_policy_id: Optional[str], email_notifications: Optional[JobEmailNotifications], environments: Optional[List[JobEnvironment]], git_source: Optional[GitSource], health: Optional[JobsHealthRules], idempotency_token: Optional[str], notification_settings: Optional[JobNotificationSettings], queue: Optional[QueueSettings], run_as: Optional[JobRunAs], run_name: Optional[str], tasks: Optional[List[SubmitTask]], timeout_seconds: Optional[int], webhook_notifications: Optional[WebhookNotifications], timeout: datetime.timedelta = 0:20:00]) -> Run + .. py:method:: submit_and_wait( [, access_control_list: Optional[List[JobAccessControlRequest]], budget_policy_id: Optional[str], email_notifications: Optional[JobEmailNotifications], environments: Optional[List[JobEnvironment]], git_source: Optional[GitSource], health: Optional[JobsHealthRules], idempotency_token: Optional[str], notification_settings: Optional[JobNotificationSettings], queue: Optional[QueueSettings], run_as: Optional[JobRunAs], run_name: Optional[str], tasks: Optional[List[SubmitTask]], timeout_seconds: Optional[int], usage_policy_id: Optional[str], webhook_notifications: Optional[WebhookNotifications], timeout: datetime.timedelta = 0:20:00]) -> Run .. py:method:: update(job_id: int [, fields_to_remove: Optional[List[str]], new_settings: Optional[JobSettings]]) diff --git a/docs/workspace/serving/serving_endpoints.rst b/docs/workspace/serving/serving_endpoints.rst index 3939b44e0..c96f3fbbe 100644 --- a/docs/workspace/serving/serving_endpoints.rst +++ b/docs/workspace/serving/serving_endpoints.rst @@ -122,6 +122,40 @@ .. py:method:: get_open_ai_client() + Create an OpenAI client configured for Databricks Model Serving. + + Returns an OpenAI client instance that is pre-configured to send requests to + Databricks Model Serving endpoints. The client uses Databricks authentication + to query endpoints within the workspace associated with the current WorkspaceClient + instance. + + Args: + **kwargs: Additional parameters to pass to the OpenAI client constructor. + Common parameters include: + - timeout (float): Request timeout in seconds (e.g., 30.0) + - max_retries (int): Maximum number of retries for failed requests (e.g., 3) + - default_headers (dict): Additional headers to include with requests + - default_query (dict): Additional query parameters to include with requests + + Any parameter accepted by the OpenAI client constructor can be passed here, + except for the following parameters which are reserved for Databricks integration: + base_url, api_key, http_client + + Returns: + OpenAI: An OpenAI client instance configured for Databricks Model Serving. + + Raises: + ImportError: If the OpenAI library is not installed. + ValueError: If any reserved Databricks parameters are provided in kwargs. + + Example: + >>> client = workspace_client.serving_endpoints.get_open_ai_client() + >>> # With custom timeout and retries + >>> client = workspace_client.serving_endpoints.get_open_ai_client( + ... timeout=30.0, + ... max_retries=5 + ... ) + .. py:method:: get_open_api(name: str) -> GetOpenApiResponse @@ -245,12 +279,15 @@ :returns: :class:`PutAiGatewayResponse` - .. py:method:: query(name: str [, dataframe_records: Optional[List[Any]], dataframe_split: Optional[DataframeSplitInput], extra_params: Optional[Dict[str, str]], input: Optional[Any], inputs: Optional[Any], instances: Optional[List[Any]], max_tokens: Optional[int], messages: Optional[List[ChatMessage]], n: Optional[int], prompt: Optional[Any], stop: Optional[List[str]], stream: Optional[bool], temperature: Optional[float]]) -> QueryEndpointResponse + .. py:method:: query(name: str [, client_request_id: Optional[str], dataframe_records: Optional[List[Any]], dataframe_split: Optional[DataframeSplitInput], extra_params: Optional[Dict[str, str]], input: Optional[Any], inputs: Optional[Any], instances: Optional[List[Any]], max_tokens: Optional[int], messages: Optional[List[ChatMessage]], n: Optional[int], prompt: Optional[Any], stop: Optional[List[str]], stream: Optional[bool], temperature: Optional[float], usage_context: Optional[Dict[str, str]]]) -> QueryEndpointResponse Query a serving endpoint :param name: str The name of the serving endpoint. This field is required and is provided via the path parameter. + :param client_request_id: str (optional) + Optional user-provided request identifier that will be recorded in the inference table and the usage + tracking table. :param dataframe_records: List[Any] (optional) Pandas Dataframe input in the records orientation. :param dataframe_split: :class:`DataframeSplitInput` (optional) @@ -292,6 +329,8 @@ The temperature field used ONLY for __completions__ and __chat external & foundation model__ serving endpoints. This is a float between 0.0 and 2.0 with a default of 1.0 and should only be used with other chat/completions query fields. + :param usage_context: Dict[str,str] (optional) + Optional user-provided context that will be recorded in the usage tracking table. :returns: :class:`QueryEndpointResponse` diff --git a/docs/workspace/serving/serving_endpoints_data_plane.rst b/docs/workspace/serving/serving_endpoints_data_plane.rst index 9177cb295..028231da3 100644 --- a/docs/workspace/serving/serving_endpoints_data_plane.rst +++ b/docs/workspace/serving/serving_endpoints_data_plane.rst @@ -7,12 +7,15 @@ Serving endpoints DataPlane provides a set of operations to interact with data plane endpoints for Serving endpoints service. - .. py:method:: query(name: str [, dataframe_records: Optional[List[Any]], dataframe_split: Optional[DataframeSplitInput], extra_params: Optional[Dict[str, str]], input: Optional[Any], inputs: Optional[Any], instances: Optional[List[Any]], max_tokens: Optional[int], messages: Optional[List[ChatMessage]], n: Optional[int], prompt: Optional[Any], stop: Optional[List[str]], stream: Optional[bool], temperature: Optional[float]]) -> QueryEndpointResponse + .. py:method:: query(name: str [, client_request_id: Optional[str], dataframe_records: Optional[List[Any]], dataframe_split: Optional[DataframeSplitInput], extra_params: Optional[Dict[str, str]], input: Optional[Any], inputs: Optional[Any], instances: Optional[List[Any]], max_tokens: Optional[int], messages: Optional[List[ChatMessage]], n: Optional[int], prompt: Optional[Any], stop: Optional[List[str]], stream: Optional[bool], temperature: Optional[float], usage_context: Optional[Dict[str, str]]]) -> QueryEndpointResponse Query a serving endpoint :param name: str The name of the serving endpoint. This field is required and is provided via the path parameter. + :param client_request_id: str (optional) + Optional user-provided request identifier that will be recorded in the inference table and the usage + tracking table. :param dataframe_records: List[Any] (optional) Pandas Dataframe input in the records orientation. :param dataframe_split: :class:`DataframeSplitInput` (optional) @@ -54,6 +57,8 @@ The temperature field used ONLY for __completions__ and __chat external & foundation model__ serving endpoints. This is a float between 0.0 and 2.0 with a default of 1.0 and should only be used with other chat/completions query fields. + :param usage_context: Dict[str,str] (optional) + Optional user-provided context that will be recorded in the usage tracking table. :returns: :class:`QueryEndpointResponse` \ No newline at end of file diff --git a/docs/workspace/settingsv2/index.rst b/docs/workspace/settingsv2/index.rst new file mode 100644 index 000000000..479687d28 --- /dev/null +++ b/docs/workspace/settingsv2/index.rst @@ -0,0 +1,10 @@ + +SettingsV2 +========== + +Manage admin settings + +.. toctree:: + :maxdepth: 1 + + workspace_settings_v2 \ No newline at end of file diff --git a/docs/workspace/settingsv2/workspace_settings_v2.rst b/docs/workspace/settingsv2/workspace_settings_v2.rst new file mode 100644 index 000000000..da8557baf --- /dev/null +++ b/docs/workspace/settingsv2/workspace_settings_v2.rst @@ -0,0 +1,46 @@ +``w.workspace_settings_v2``: WorkspaceSettings.v2 +================================================= +.. currentmodule:: databricks.sdk.service.settingsv2 + +.. py:class:: WorkspaceSettingsV2API + + APIs to manage workspace level settings + + .. py:method:: get_public_workspace_setting(name: str) -> Setting + + Get a setting value at workspace level + + :param name: str + + :returns: :class:`Setting` + + + .. py:method:: list_workspace_settings_metadata( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[SettingsMetadata] + + List valid setting keys and metadata. These settings are available to referenced via [GET + /api/2.1/settings/{name}](#~1api~1workspace~1settingsv2~1getpublicworkspacesetting) and [PATCH + /api/2.1/settings/{name}](#~1api~1workspace~1settingsv2~patchpublicworkspacesetting) APIs + + :param page_size: int (optional) + The maximum number of settings to return. The service may return fewer than this value. If + unspecified, at most 200 settings will be returned. The maximum value is 1000; values above 1000 + will be coerced to 1000. + :param page_token: str (optional) + A page token, received from a previous `ListWorkspaceSettingsMetadataRequest` call. Provide this to + retrieve the subsequent page. + + When paginating, all other parameters provided to `ListWorkspaceSettingsMetadataRequest` must match + the call that provided the page token. + + :returns: Iterator over :class:`SettingsMetadata` + + + .. py:method:: patch_public_workspace_setting(name: str, setting: Setting) -> Setting + + Patch a setting value at workspace level + + :param name: str + :param setting: :class:`Setting` + + :returns: :class:`Setting` + \ No newline at end of file diff --git a/docs/workspace/tags/index.rst b/docs/workspace/tags/index.rst new file mode 100644 index 000000000..5489c384d --- /dev/null +++ b/docs/workspace/tags/index.rst @@ -0,0 +1,10 @@ + +Tags +==== + +Manage tag policies and tag assignments on workspace objects + +.. toctree:: + :maxdepth: 1 + + tag_policies \ No newline at end of file diff --git a/docs/workspace/tags/tag_policies.rst b/docs/workspace/tags/tag_policies.rst new file mode 100644 index 000000000..2be7f5360 --- /dev/null +++ b/docs/workspace/tags/tag_policies.rst @@ -0,0 +1,68 @@ +``w.tag_policies``: Tag Policies +================================ +.. currentmodule:: databricks.sdk.service.tags + +.. py:class:: TagPoliciesAPI + + The Tag Policy API allows you to manage tag policies in Databricks. + + .. py:method:: create_tag_policy(tag_policy: TagPolicy) -> TagPolicy + + Creates a new tag policy. + + :param tag_policy: :class:`TagPolicy` + + :returns: :class:`TagPolicy` + + + .. py:method:: delete_tag_policy(tag_key: str) + + Deletes a tag policy by its key. + + :param tag_key: str + + + + + .. py:method:: get_tag_policy(tag_key: str) -> TagPolicy + + Gets a single tag policy by its key. + + :param tag_key: str + + :returns: :class:`TagPolicy` + + + .. py:method:: list_tag_policies( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[TagPolicy] + + Lists all tag policies in the account. + + :param page_size: int (optional) + The maximum number of results to return in this request. Fewer results may be returned than + requested. If unspecified or set to 0, this defaults to 1000. The maximum value is 1000; values + above 1000 will be coerced down to 1000. + :param page_token: str (optional) + An optional page token received from a previous list tag policies call. + + :returns: Iterator over :class:`TagPolicy` + + + .. py:method:: update_tag_policy(tag_key: str, tag_policy: TagPolicy, update_mask: str) -> TagPolicy + + Updates an existing tag policy. + + :param tag_key: str + :param tag_policy: :class:`TagPolicy` + :param update_mask: str + The field mask must be a single string, with multiple fields separated by commas (no spaces). The + field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., + `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only + the entire collection field can be specified. Field names must exactly match the resource field + names. + + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the + fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API + changes in the future. + + :returns: :class:`TagPolicy` + \ No newline at end of file diff --git a/docs/workspace/vectorsearch/vector_search_endpoints.rst b/docs/workspace/vectorsearch/vector_search_endpoints.rst index 510bc6868..47a8fa59a 100644 --- a/docs/workspace/vectorsearch/vector_search_endpoints.rst +++ b/docs/workspace/vectorsearch/vector_search_endpoints.rst @@ -62,7 +62,8 @@ :param endpoint_name: str Name of the vector search endpoint :param budget_policy_id: str - The budget policy id to be applied + The budget policy id to be applied (hima-sheth) TODO: remove this once we've migrated to usage + policies :returns: :class:`PatchEndpointBudgetPolicyResponse` diff --git a/docs/workspace/vectorsearch/vector_search_indexes.rst b/docs/workspace/vectorsearch/vector_search_indexes.rst index ec8efd3c1..11417c9da 100644 --- a/docs/workspace/vectorsearch/vector_search_indexes.rst +++ b/docs/workspace/vectorsearch/vector_search_indexes.rst @@ -53,12 +53,16 @@ - .. py:method:: get_index(index_name: str) -> VectorIndex + .. py:method:: get_index(index_name: str [, ensure_reranker_compatible: Optional[bool]]) -> VectorIndex Get an index. :param index_name: str Name of the index + :param ensure_reranker_compatible: bool (optional) + If true, the URL returned for the index is guaranteed to be compatible with the reranker. Currently + this means we return the CP URL regardless of how the index is being accessed. If not set or set to + false, the URL may still be compatible with the reranker depending on what URL we return. :returns: :class:`VectorIndex` @@ -75,7 +79,7 @@ :returns: Iterator over :class:`MiniVectorIndex` - .. py:method:: query_index(index_name: str, columns: List[str] [, columns_to_rerank: Optional[List[str]], filters_json: Optional[str], num_results: Optional[int], query_text: Optional[str], query_type: Optional[str], query_vector: Optional[List[float]], score_threshold: Optional[float]]) -> QueryVectorIndexResponse + .. py:method:: query_index(index_name: str, columns: List[str] [, columns_to_rerank: Optional[List[str]], filters_json: Optional[str], num_results: Optional[int], query_text: Optional[str], query_type: Optional[str], query_vector: Optional[List[float]], reranker: Optional[RerankerConfig], score_threshold: Optional[float]]) -> QueryVectorIndexResponse Query the specified vector index. @@ -102,6 +106,7 @@ :param query_vector: List[float] (optional) Query vector. Required for Direct Vector Access Index and Delta Sync Index using self-managed vectors. + :param reranker: :class:`RerankerConfig` (optional) :param score_threshold: float (optional) Threshold for the approximate nearest neighbor search. Defaults to 0.0.