diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 19009050a..19086887a 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -59c4c0f3d5f0ef00cd5350b5674e941a7606d91a \ No newline at end of file +a1c8a01392c7cfef8becc9e9e3eb8236cdcbdfbd \ No newline at end of file diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 9c6c9cf3f..66caaad70 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -11,3 +11,25 @@ ### Internal Changes ### API Changes +* Add `CreateSpace` and `UpdateSpace` methods for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service. +* Add `BatchCreateMaterializedFeatures` method for [w.FeatureEngineering](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#FeatureEngineeringAPI) workspace-level service. +* Add `RetrieveUserVisibleMetrics` method for [w.VectorSearchEndpoints](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchEndpointsAPI) workspace-level service. +* Add `IncludeSerializedSpace` field for [dashboards.GenieGetSpaceRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieGetSpaceRequest). +* Add `Purpose` field for [dashboards.TextAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#TextAttachment). +* Add `BudgetPolicyId` field for [database.NewPipelineSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/database#NewPipelineSpec). +* Add `LineageContext` field for [ml.Feature](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#Feature). +* Add `ConnectionParameters` field for [pipelines.IngestionGatewayPipelineDefinition](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#IngestionGatewayPipelineDefinition). +* Add `IngestFromUcForeignCatalog` field for [pipelines.IngestionPipelineDefinition](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#IngestionPipelineDefinition). +* Add `RewindSpec` field for [pipelines.StartUpdate](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#StartUpdate). +* Add `TypeText` field for [vectorsearch.ColumnInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#ColumnInfo). +* Add `AutoscaleV2` enum value for [compute.EventDetailsCause](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#EventDetailsCause). +* Add `UnsupportedConversationTypeException` enum value for [dashboards.MessageErrorType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#MessageErrorType). +* Add `ForeignCatalog` enum value for [pipelines.IngestionSourceType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#IngestionSourceType). +* Add `Creating` and `CreateFailed` enum values for [settings.CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionState](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionState). +* Add `Creating` and `CreateFailed` enum values for [settings.NccAzurePrivateEndpointRuleConnectionState](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzurePrivateEndpointRuleConnectionState). +* Add `RedState` and `YellowState` enum values for [vectorsearch.EndpointStatusState](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#EndpointStatusState). +* Change `Destinations` field for [catalog.AccessRequestDestinations](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#AccessRequestDestinations) to no longer be required. +* [Breaking] Change `Destinations` field for [catalog.AccessRequestDestinations](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#AccessRequestDestinations) to no longer be required. +* Change `TableNames` field for [jobs.TableUpdateTriggerConfiguration](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#TableUpdateTriggerConfiguration) to be required. +* [Breaking] Change `TableNames` field for [jobs.TableUpdateTriggerConfiguration](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#TableUpdateTriggerConfiguration) to be required. +* [Breaking] Change `OnlineStoreConfig` field for [ml.MaterializedFeature](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#MaterializedFeature) to type [ml.OnlineStoreConfig](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#OnlineStoreConfig). \ No newline at end of file diff --git a/experimental/mocks/mock_account_client.go b/experimental/mocks/mock_account_client.go index 347e94b5c..6417fe476 100755 --- a/experimental/mocks/mock_account_client.go +++ b/experimental/mocks/mock_account_client.go @@ -6,7 +6,6 @@ import ( "fmt" "github.com/databricks/databricks-sdk-go" - "github.com/stretchr/testify/mock" "github.com/databricks/databricks-sdk-go/experimental/mocks/service/billing" "github.com/databricks/databricks-sdk-go/experimental/mocks/service/catalog" @@ -16,6 +15,7 @@ import ( "github.com/databricks/databricks-sdk-go/experimental/mocks/service/provisioning" "github.com/databricks/databricks-sdk-go/experimental/mocks/service/settings" "github.com/databricks/databricks-sdk-go/experimental/mocks/service/settingsv2" + "github.com/stretchr/testify/mock" ) type MockAccountClient struct { diff --git a/experimental/mocks/mock_workspace_client.go b/experimental/mocks/mock_workspace_client.go index c56df3da5..fb6efa051 100755 --- a/experimental/mocks/mock_workspace_client.go +++ b/experimental/mocks/mock_workspace_client.go @@ -6,7 +6,6 @@ import ( "fmt" "github.com/databricks/databricks-sdk-go" - "github.com/stretchr/testify/mock" "github.com/databricks/databricks-sdk-go/experimental/mocks/service/agentbricks" "github.com/databricks/databricks-sdk-go/experimental/mocks/service/apps" @@ -33,6 +32,7 @@ import ( "github.com/databricks/databricks-sdk-go/experimental/mocks/service/tags" "github.com/databricks/databricks-sdk-go/experimental/mocks/service/vectorsearch" "github.com/databricks/databricks-sdk-go/experimental/mocks/service/workspace" + "github.com/stretchr/testify/mock" ) type MockWorkspaceClient struct { diff --git a/experimental/mocks/service/dashboards/mock_genie_interface.go b/experimental/mocks/service/dashboards/mock_genie_interface.go index 513cfb0a6..0d8b07228 100644 --- a/experimental/mocks/service/dashboards/mock_genie_interface.go +++ b/experimental/mocks/service/dashboards/mock_genie_interface.go @@ -159,6 +159,65 @@ func (_c *MockGenieInterface_CreateMessageAndWait_Call) RunAndReturn(run func(co return _c } +// CreateSpace provides a mock function with given fields: ctx, request +func (_m *MockGenieInterface) CreateSpace(ctx context.Context, request dashboards.GenieCreateSpaceRequest) (*dashboards.GenieSpace, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for CreateSpace") + } + + var r0 *dashboards.GenieSpace + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, dashboards.GenieCreateSpaceRequest) (*dashboards.GenieSpace, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, dashboards.GenieCreateSpaceRequest) *dashboards.GenieSpace); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*dashboards.GenieSpace) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, dashboards.GenieCreateSpaceRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGenieInterface_CreateSpace_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateSpace' +type MockGenieInterface_CreateSpace_Call struct { + *mock.Call +} + +// CreateSpace is a helper method to define mock.On call +// - ctx context.Context +// - request dashboards.GenieCreateSpaceRequest +func (_e *MockGenieInterface_Expecter) CreateSpace(ctx interface{}, request interface{}) *MockGenieInterface_CreateSpace_Call { + return &MockGenieInterface_CreateSpace_Call{Call: _e.mock.On("CreateSpace", ctx, request)} +} + +func (_c *MockGenieInterface_CreateSpace_Call) Run(run func(ctx context.Context, request dashboards.GenieCreateSpaceRequest)) *MockGenieInterface_CreateSpace_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(dashboards.GenieCreateSpaceRequest)) + }) + return _c +} + +func (_c *MockGenieInterface_CreateSpace_Call) Return(_a0 *dashboards.GenieSpace, _a1 error) *MockGenieInterface_CreateSpace_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGenieInterface_CreateSpace_Call) RunAndReturn(run func(context.Context, dashboards.GenieCreateSpaceRequest) (*dashboards.GenieSpace, error)) *MockGenieInterface_CreateSpace_Call { + _c.Call.Return(run) + return _c +} + // DeleteConversation provides a mock function with given fields: ctx, request func (_m *MockGenieInterface) DeleteConversation(ctx context.Context, request dashboards.GenieDeleteConversationRequest) error { ret := _m.Called(ctx, request) @@ -1529,6 +1588,65 @@ func (_c *MockGenieInterface_TrashSpaceBySpaceId_Call) RunAndReturn(run func(con return _c } +// UpdateSpace provides a mock function with given fields: ctx, request +func (_m *MockGenieInterface) UpdateSpace(ctx context.Context, request dashboards.GenieUpdateSpaceRequest) (*dashboards.GenieSpace, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for UpdateSpace") + } + + var r0 *dashboards.GenieSpace + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, dashboards.GenieUpdateSpaceRequest) (*dashboards.GenieSpace, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, dashboards.GenieUpdateSpaceRequest) *dashboards.GenieSpace); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*dashboards.GenieSpace) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, dashboards.GenieUpdateSpaceRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGenieInterface_UpdateSpace_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateSpace' +type MockGenieInterface_UpdateSpace_Call struct { + *mock.Call +} + +// UpdateSpace is a helper method to define mock.On call +// - ctx context.Context +// - request dashboards.GenieUpdateSpaceRequest +func (_e *MockGenieInterface_Expecter) UpdateSpace(ctx interface{}, request interface{}) *MockGenieInterface_UpdateSpace_Call { + return &MockGenieInterface_UpdateSpace_Call{Call: _e.mock.On("UpdateSpace", ctx, request)} +} + +func (_c *MockGenieInterface_UpdateSpace_Call) Run(run func(ctx context.Context, request dashboards.GenieUpdateSpaceRequest)) *MockGenieInterface_UpdateSpace_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(dashboards.GenieUpdateSpaceRequest)) + }) + return _c +} + +func (_c *MockGenieInterface_UpdateSpace_Call) Return(_a0 *dashboards.GenieSpace, _a1 error) *MockGenieInterface_UpdateSpace_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGenieInterface_UpdateSpace_Call) RunAndReturn(run func(context.Context, dashboards.GenieUpdateSpaceRequest) (*dashboards.GenieSpace, error)) *MockGenieInterface_UpdateSpace_Call { + _c.Call.Return(run) + return _c +} + // WaitGetMessageGenieCompleted provides a mock function with given fields: ctx, conversationId, messageId, spaceId, timeout, callback func (_m *MockGenieInterface) WaitGetMessageGenieCompleted(ctx context.Context, conversationId string, messageId string, spaceId string, timeout time.Duration, callback func(*dashboards.GenieMessage)) (*dashboards.GenieMessage, error) { ret := _m.Called(ctx, conversationId, messageId, spaceId, timeout, callback) diff --git a/experimental/mocks/service/ml/mock_feature_engineering_interface.go b/experimental/mocks/service/ml/mock_feature_engineering_interface.go index 478f94fff..af59cd3df 100644 --- a/experimental/mocks/service/ml/mock_feature_engineering_interface.go +++ b/experimental/mocks/service/ml/mock_feature_engineering_interface.go @@ -24,6 +24,65 @@ func (_m *MockFeatureEngineeringInterface) EXPECT() *MockFeatureEngineeringInter return &MockFeatureEngineeringInterface_Expecter{mock: &_m.Mock} } +// BatchCreateMaterializedFeatures provides a mock function with given fields: ctx, request +func (_m *MockFeatureEngineeringInterface) BatchCreateMaterializedFeatures(ctx context.Context, request ml.BatchCreateMaterializedFeaturesRequest) (*ml.BatchCreateMaterializedFeaturesResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for BatchCreateMaterializedFeatures") + } + + var r0 *ml.BatchCreateMaterializedFeaturesResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ml.BatchCreateMaterializedFeaturesRequest) (*ml.BatchCreateMaterializedFeaturesResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, ml.BatchCreateMaterializedFeaturesRequest) *ml.BatchCreateMaterializedFeaturesResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ml.BatchCreateMaterializedFeaturesResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ml.BatchCreateMaterializedFeaturesRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockFeatureEngineeringInterface_BatchCreateMaterializedFeatures_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BatchCreateMaterializedFeatures' +type MockFeatureEngineeringInterface_BatchCreateMaterializedFeatures_Call struct { + *mock.Call +} + +// BatchCreateMaterializedFeatures is a helper method to define mock.On call +// - ctx context.Context +// - request ml.BatchCreateMaterializedFeaturesRequest +func (_e *MockFeatureEngineeringInterface_Expecter) BatchCreateMaterializedFeatures(ctx interface{}, request interface{}) *MockFeatureEngineeringInterface_BatchCreateMaterializedFeatures_Call { + return &MockFeatureEngineeringInterface_BatchCreateMaterializedFeatures_Call{Call: _e.mock.On("BatchCreateMaterializedFeatures", ctx, request)} +} + +func (_c *MockFeatureEngineeringInterface_BatchCreateMaterializedFeatures_Call) Run(run func(ctx context.Context, request ml.BatchCreateMaterializedFeaturesRequest)) *MockFeatureEngineeringInterface_BatchCreateMaterializedFeatures_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ml.BatchCreateMaterializedFeaturesRequest)) + }) + return _c +} + +func (_c *MockFeatureEngineeringInterface_BatchCreateMaterializedFeatures_Call) Return(_a0 *ml.BatchCreateMaterializedFeaturesResponse, _a1 error) *MockFeatureEngineeringInterface_BatchCreateMaterializedFeatures_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockFeatureEngineeringInterface_BatchCreateMaterializedFeatures_Call) RunAndReturn(run func(context.Context, ml.BatchCreateMaterializedFeaturesRequest) (*ml.BatchCreateMaterializedFeaturesResponse, error)) *MockFeatureEngineeringInterface_BatchCreateMaterializedFeatures_Call { + _c.Call.Return(run) + return _c +} + // CreateFeature provides a mock function with given fields: ctx, request func (_m *MockFeatureEngineeringInterface) CreateFeature(ctx context.Context, request ml.CreateFeatureRequest) (*ml.Feature, error) { ret := _m.Called(ctx, request) diff --git a/experimental/mocks/service/vectorsearch/mock_vector_search_endpoints_interface.go b/experimental/mocks/service/vectorsearch/mock_vector_search_endpoints_interface.go index 9cc2b4d0f..d2197cfe4 100644 --- a/experimental/mocks/service/vectorsearch/mock_vector_search_endpoints_interface.go +++ b/experimental/mocks/service/vectorsearch/mock_vector_search_endpoints_interface.go @@ -481,6 +481,65 @@ func (_c *MockVectorSearchEndpointsInterface_ListEndpointsAll_Call) RunAndReturn return _c } +// RetrieveUserVisibleMetrics provides a mock function with given fields: ctx, request +func (_m *MockVectorSearchEndpointsInterface) RetrieveUserVisibleMetrics(ctx context.Context, request vectorsearch.RetrieveUserVisibleMetricsRequest) (*vectorsearch.RetrieveUserVisibleMetricsResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for RetrieveUserVisibleMetrics") + } + + var r0 *vectorsearch.RetrieveUserVisibleMetricsResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, vectorsearch.RetrieveUserVisibleMetricsRequest) (*vectorsearch.RetrieveUserVisibleMetricsResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, vectorsearch.RetrieveUserVisibleMetricsRequest) *vectorsearch.RetrieveUserVisibleMetricsResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vectorsearch.RetrieveUserVisibleMetricsResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, vectorsearch.RetrieveUserVisibleMetricsRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockVectorSearchEndpointsInterface_RetrieveUserVisibleMetrics_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RetrieveUserVisibleMetrics' +type MockVectorSearchEndpointsInterface_RetrieveUserVisibleMetrics_Call struct { + *mock.Call +} + +// RetrieveUserVisibleMetrics is a helper method to define mock.On call +// - ctx context.Context +// - request vectorsearch.RetrieveUserVisibleMetricsRequest +func (_e *MockVectorSearchEndpointsInterface_Expecter) RetrieveUserVisibleMetrics(ctx interface{}, request interface{}) *MockVectorSearchEndpointsInterface_RetrieveUserVisibleMetrics_Call { + return &MockVectorSearchEndpointsInterface_RetrieveUserVisibleMetrics_Call{Call: _e.mock.On("RetrieveUserVisibleMetrics", ctx, request)} +} + +func (_c *MockVectorSearchEndpointsInterface_RetrieveUserVisibleMetrics_Call) Run(run func(ctx context.Context, request vectorsearch.RetrieveUserVisibleMetricsRequest)) *MockVectorSearchEndpointsInterface_RetrieveUserVisibleMetrics_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(vectorsearch.RetrieveUserVisibleMetricsRequest)) + }) + return _c +} + +func (_c *MockVectorSearchEndpointsInterface_RetrieveUserVisibleMetrics_Call) Return(_a0 *vectorsearch.RetrieveUserVisibleMetricsResponse, _a1 error) *MockVectorSearchEndpointsInterface_RetrieveUserVisibleMetrics_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockVectorSearchEndpointsInterface_RetrieveUserVisibleMetrics_Call) RunAndReturn(run func(context.Context, vectorsearch.RetrieveUserVisibleMetricsRequest) (*vectorsearch.RetrieveUserVisibleMetricsResponse, error)) *MockVectorSearchEndpointsInterface_RetrieveUserVisibleMetrics_Call { + _c.Call.Return(run) + return _c +} + // UpdateEndpointBudgetPolicy provides a mock function with given fields: ctx, request func (_m *MockVectorSearchEndpointsInterface) UpdateEndpointBudgetPolicy(ctx context.Context, request vectorsearch.PatchEndpointBudgetPolicyRequest) (*vectorsearch.PatchEndpointBudgetPolicyResponse, error) { ret := _m.Called(ctx, request) diff --git a/internal/generatedtests/json_marshall_test.go b/internal/generatedtests/json_marshall_test.go index 996934e0e..eb0c39618 100755 --- a/internal/generatedtests/json_marshall_test.go +++ b/internal/generatedtests/json_marshall_test.go @@ -204,7 +204,7 @@ func TestJsonMarshall(t *testing.T) { RequiredString: "non_default_string", RequiredStruct: map[string]json.RawMessage{}, RequiredTimestamp: *time.New(timeFromString("2023-12-31T23:59:59Z")), - RequiredValue: json.RawMessage("{}"), + RequiredValue: json.RawMessage("{\"key\": \"value\"}"), TestRequiredEnum: jsonmarshallv2.TestEnumTestEnumTwo, }, want: `{ @@ -212,7 +212,7 @@ func TestJsonMarshall(t *testing.T) { "required_int32": 42, "required_int64": 1234567890123456789, "required_bool": true, - "required_value": {}, + "required_value": {"key": "value"}, "required_list_value": [], "required_struct": {}, "required_message": {}, diff --git a/internal/testspecs/service/lrotesting/model.go b/internal/testspecs/service/lrotesting/model.go index 805c57808..34ef4fb41 100755 --- a/internal/testspecs/service/lrotesting/model.go +++ b/internal/testspecs/service/lrotesting/model.go @@ -19,12 +19,7 @@ type CreateTestResourceRequest struct { Resource TestResource `json:"resource"` } -// Serialization format for DatabricksServiceException with error details. This -// message doesn't work for ScalaPB-04 as google.protobuf.Any is only available -// to ScalaPB-09. Note the definition of this message should be in sync with -// DatabricksServiceExceptionProto defined in -// /api-base/proto/legacy/databricks.proto except the later one doesn't have the -// error details field defined. +// Databricks Error that is returned by all Databricks APIs. type DatabricksServiceExceptionWithDetailsProto struct { // @pbjson-skip Details []json.RawMessage `json:"details,omitempty"` @@ -350,24 +345,13 @@ type Operation struct { Error *DatabricksServiceExceptionWithDetailsProto `json:"error,omitempty"` // Service-specific metadata associated with the operation. It typically // contains progress information and common metadata such as create time. - // Some services might not provide such metadata. Any method that returns a - // long-running operation should document the metadata type, if any. + // Some services might not provide such metadata. Metadata json.RawMessage `json:"metadata,omitempty"` // The server-assigned name, which is only unique within the same service // that originally returns it. If you use the default HTTP mapping, the // `name` should be a resource name ending with `operations/{unique_id}`. - // - // Note: multi-segment resource names are not yet supported in the RPC - // framework and SDK/TF. Until that support is added, `name` must be string - // without internal `/` separators. Name string `json:"name,omitempty"` - // The normal, successful response of the operation. If the original method - // returns no data on success, such as `Delete`, the response is - // `google.protobuf.Empty`. If the original method is standard - // `Get`/`Create`/`Update`, the response should be the resource. For other - // methods, the response should have the type `XxxResponse`, where `Xxx` is - // the original method name. For example, if the original method name is - // `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`. + // The normal, successful response of the operation. Response json.RawMessage `json:"response,omitempty"` ForceSendFields []string `json:"-" url:"-"` diff --git a/service/apps/model.go b/service/apps/model.go index 3bc388448..1fa74eacc 100755 --- a/service/apps/model.go +++ b/service/apps/model.go @@ -1256,8 +1256,6 @@ type ComputeSize string const ComputeSizeLarge ComputeSize = `LARGE` -const ComputeSizeLiquid ComputeSize = `LIQUID` - const ComputeSizeMedium ComputeSize = `MEDIUM` // String representation for [fmt.Print] @@ -1268,11 +1266,11 @@ func (f *ComputeSize) String() string { // Set raw string value and validate it against allowed values func (f *ComputeSize) Set(v string) error { switch v { - case `LARGE`, `LIQUID`, `MEDIUM`: + case `LARGE`, `MEDIUM`: *f = ComputeSize(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "LARGE", "LIQUID", "MEDIUM"`, v) + return fmt.Errorf(`value "%s" is not one of "LARGE", "MEDIUM"`, v) } } @@ -1282,7 +1280,6 @@ func (f *ComputeSize) Set(v string) error { func (f *ComputeSize) Values() []ComputeSize { return []ComputeSize{ ComputeSizeLarge, - ComputeSizeLiquid, ComputeSizeMedium, } } diff --git a/service/catalog/api.go b/service/catalog/api.go index b0fed8f3a..74d4b5580 100755 --- a/service/catalog/api.go +++ b/service/catalog/api.go @@ -2317,12 +2317,11 @@ func NewRfa(client *client.DatabricksClient) *RfaAPI { } } -// Request for Access enables customers to request access to and manage access -// request destinations for Unity Catalog securables. +// Request for Access enables users to request access for Unity Catalog +// securables. // -// These APIs provide a standardized way to update, get, and request to access -// request destinations. Fine-grained authorization ensures that only users with -// appropriate permissions can manage access request destinations. +// These APIs provide a standardized way for securable owners (or users with +// MANAGE privileges) to manage access request destinations. type RfaAPI struct { rfaImpl } diff --git a/service/catalog/impl.go b/service/catalog/impl.go index 6584eeee1..9a46b7263 100755 --- a/service/catalog/impl.go +++ b/service/catalog/impl.go @@ -340,6 +340,8 @@ func (a *catalogsImpl) Get(ctx context.Context, request GetCatalogRequest) (*Cat // indication that the end of results has been reached. func (a *catalogsImpl) List(ctx context.Context, request ListCatalogsRequest) listing.Iterator[CatalogInfo] { + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + getNextPage := func(ctx context.Context, req ListCatalogsRequest) (*ListCatalogsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") return a.internalList(ctx, req) @@ -447,6 +449,8 @@ func (a *connectionsImpl) Get(ctx context.Context, request GetConnectionRequest) // indication that the end of results has been reached. func (a *connectionsImpl) List(ctx context.Context, request ListConnectionsRequest) listing.Iterator[ConnectionInfo] { + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + getNextPage := func(ctx context.Context, req ListConnectionsRequest) (*ListConnectionsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") return a.internalList(ctx, req) @@ -867,6 +871,8 @@ func (a *externalLocationsImpl) Get(ctx context.Context, request GetExternalLoca // indication that the end of results has been reached. func (a *externalLocationsImpl) List(ctx context.Context, request ListExternalLocationsRequest) listing.Iterator[ExternalLocationInfo] { + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + getNextPage := func(ctx context.Context, req ListExternalLocationsRequest) (*ListExternalLocationsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") return a.internalList(ctx, req) @@ -1078,6 +1084,8 @@ func (a *functionsImpl) Get(ctx context.Context, request GetFunctionRequest) (*F // indication that the end of results has been reached. func (a *functionsImpl) List(ctx context.Context, request ListFunctionsRequest) listing.Iterator[FunctionInfo] { + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + getNextPage := func(ctx context.Context, req ListFunctionsRequest) (*ListFunctionsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") return a.internalList(ctx, req) @@ -1245,6 +1253,8 @@ func (a *metastoresImpl) Get(ctx context.Context, request GetMetastoreRequest) ( // indication that the end of results has been reached. func (a *metastoresImpl) List(ctx context.Context, request ListMetastoresRequest) listing.Iterator[MetastoreInfo] { + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + getNextPage := func(ctx context.Context, req ListMetastoresRequest) (*ListMetastoresResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") return a.internalList(ctx, req) @@ -1986,6 +1996,8 @@ func (a *schemasImpl) Get(ctx context.Context, request GetSchemaRequest) (*Schem // indication that the end of results has been reached. func (a *schemasImpl) List(ctx context.Context, request ListSchemasRequest) listing.Iterator[SchemaInfo] { + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + getNextPage := func(ctx context.Context, req ListSchemasRequest) (*ListSchemasResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") return a.internalList(ctx, req) @@ -2097,6 +2109,8 @@ func (a *storageCredentialsImpl) Get(ctx context.Context, request GetStorageCred // indication that the end of results has been reached. func (a *storageCredentialsImpl) List(ctx context.Context, request ListStorageCredentialsRequest) listing.Iterator[StorageCredentialInfo] { + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + getNextPage := func(ctx context.Context, req ListStorageCredentialsRequest) (*ListStorageCredentialsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") return a.internalList(ctx, req) @@ -2205,6 +2219,8 @@ func (a *systemSchemasImpl) Enable(ctx context.Context, request EnableRequest) e // indication that the end of results has been reached. func (a *systemSchemasImpl) List(ctx context.Context, request ListSystemSchemasRequest) listing.Iterator[SystemSchemaInfo] { + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + getNextPage := func(ctx context.Context, req ListSystemSchemasRequest) (*ListSystemSchemasResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") return a.internalList(ctx, req) @@ -2338,6 +2354,8 @@ func (a *tablesImpl) Get(ctx context.Context, request GetTableRequest) (*TableIn // indication that the end of results has been reached. func (a *tablesImpl) List(ctx context.Context, request ListTablesRequest) listing.Iterator[TableInfo] { + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + getNextPage := func(ctx context.Context, req ListTablesRequest) (*ListTablesResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") return a.internalList(ctx, req) @@ -2645,6 +2663,8 @@ func (a *workspaceBindingsImpl) Get(ctx context.Context, request GetWorkspaceBin // indication that the end of results has been reached. func (a *workspaceBindingsImpl) GetBindings(ctx context.Context, request GetBindingsRequest) listing.Iterator[WorkspaceBinding] { + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + getNextPage := func(ctx context.Context, req GetBindingsRequest) (*GetWorkspaceBindingsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") return a.internalGetBindings(ctx, req) diff --git a/service/catalog/interface.go b/service/catalog/interface.go index 32a2172ce..1314358c6 100755 --- a/service/catalog/interface.go +++ b/service/catalog/interface.go @@ -1028,12 +1028,11 @@ type ResourceQuotasService interface { ListQuotas(ctx context.Context, request ListQuotasRequest) (*ListQuotasResponse, error) } -// Request for Access enables customers to request access to and manage access -// request destinations for Unity Catalog securables. +// Request for Access enables users to request access for Unity Catalog +// securables. // -// These APIs provide a standardized way to update, get, and request to access -// request destinations. Fine-grained authorization ensures that only users with -// appropriate permissions can manage access request destinations. +// These APIs provide a standardized way for securable owners (or users with +// MANAGE privileges) to manage access request destinations. // // Deprecated: Do not use this interface, it will be removed in a future version of the SDK. type RfaService interface { diff --git a/service/catalog/model.go b/service/catalog/model.go index bbe43c2da..d38b06028 100755 --- a/service/catalog/model.go +++ b/service/catalog/model.go @@ -14,7 +14,7 @@ type AccessRequestDestinations struct { // permission to see all destinations. AreAnyDestinationsHidden bool `json:"are_any_destinations_hidden,omitempty"` // The access request destinations for the securable. - Destinations []NotificationDestination `json:"destinations"` + Destinations []NotificationDestination `json:"destinations,omitempty"` // The securable for which the access request destinations are being // retrieved. Securable Securable `json:"securable"` @@ -928,7 +928,7 @@ func (s ConnectionInfo) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Next Id: 47 +// Next Id: 48 type ConnectionType string const ConnectionTypeBigquery ConnectionType = `BIGQUERY` @@ -947,8 +947,6 @@ const ConnectionTypeMysql ConnectionType = `MYSQL` const ConnectionTypeOracle ConnectionType = `ORACLE` -const ConnectionTypePalantir ConnectionType = `PALANTIR` - const ConnectionTypePostgresql ConnectionType = `POSTGRESQL` const ConnectionTypePowerBi ConnectionType = `POWER_BI` @@ -981,11 +979,11 @@ func (f *ConnectionType) String() string { // Set raw string value and validate it against allowed values func (f *ConnectionType) Set(v string) error { switch v { - case `BIGQUERY`, `DATABRICKS`, `GA4_RAW_DATA`, `GLUE`, `HIVE_METASTORE`, `HTTP`, `MYSQL`, `ORACLE`, `PALANTIR`, `POSTGRESQL`, `POWER_BI`, `REDSHIFT`, `SALESFORCE`, `SALESFORCE_DATA_CLOUD`, `SERVICENOW`, `SNOWFLAKE`, `SQLDW`, `SQLSERVER`, `TERADATA`, `UNKNOWN_CONNECTION_TYPE`, `WORKDAY_RAAS`: + case `BIGQUERY`, `DATABRICKS`, `GA4_RAW_DATA`, `GLUE`, `HIVE_METASTORE`, `HTTP`, `MYSQL`, `ORACLE`, `POSTGRESQL`, `POWER_BI`, `REDSHIFT`, `SALESFORCE`, `SALESFORCE_DATA_CLOUD`, `SERVICENOW`, `SNOWFLAKE`, `SQLDW`, `SQLSERVER`, `TERADATA`, `UNKNOWN_CONNECTION_TYPE`, `WORKDAY_RAAS`: *f = ConnectionType(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "BIGQUERY", "DATABRICKS", "GA4_RAW_DATA", "GLUE", "HIVE_METASTORE", "HTTP", "MYSQL", "ORACLE", "PALANTIR", "POSTGRESQL", "POWER_BI", "REDSHIFT", "SALESFORCE", "SALESFORCE_DATA_CLOUD", "SERVICENOW", "SNOWFLAKE", "SQLDW", "SQLSERVER", "TERADATA", "UNKNOWN_CONNECTION_TYPE", "WORKDAY_RAAS"`, v) + return fmt.Errorf(`value "%s" is not one of "BIGQUERY", "DATABRICKS", "GA4_RAW_DATA", "GLUE", "HIVE_METASTORE", "HTTP", "MYSQL", "ORACLE", "POSTGRESQL", "POWER_BI", "REDSHIFT", "SALESFORCE", "SALESFORCE_DATA_CLOUD", "SERVICENOW", "SNOWFLAKE", "SQLDW", "SQLSERVER", "TERADATA", "UNKNOWN_CONNECTION_TYPE", "WORKDAY_RAAS"`, v) } } @@ -1002,7 +1000,6 @@ func (f *ConnectionType) Values() []ConnectionType { ConnectionTypeHttp, ConnectionTypeMysql, ConnectionTypeOracle, - ConnectionTypePalantir, ConnectionTypePostgresql, ConnectionTypePowerBi, ConnectionTypeRedshift, @@ -6912,7 +6909,7 @@ func (s Securable) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Latest kind: CONNECTION_AWS_SECRETS_MANAGER = 270; Next id:271 +// Latest kind: CONNECTION_SLACK_OAUTH_U2M_MAPPING = 272; Next id:273 type SecurableKind string const SecurableKindTableDbStorage SecurableKind = `TABLE_DB_STORAGE` @@ -6983,8 +6980,6 @@ const SecurableKindTableForeignNetsuite SecurableKind = `TABLE_FOREIGN_NETSUITE` const SecurableKindTableForeignOracle SecurableKind = `TABLE_FOREIGN_ORACLE` -const SecurableKindTableForeignPalantir SecurableKind = `TABLE_FOREIGN_PALANTIR` - const SecurableKindTableForeignPostgresql SecurableKind = `TABLE_FOREIGN_POSTGRESQL` const SecurableKindTableForeignRedshift SecurableKind = `TABLE_FOREIGN_REDSHIFT` @@ -7049,11 +7044,11 @@ func (f *SecurableKind) String() string { // Set raw string value and validate it against allowed values func (f *SecurableKind) Set(v string) error { switch v { - case `TABLE_DB_STORAGE`, `TABLE_DELTA`, `TABLE_DELTASHARING`, `TABLE_DELTASHARING_MUTABLE`, `TABLE_DELTA_EXTERNAL`, `TABLE_DELTA_ICEBERG_DELTASHARING`, `TABLE_DELTA_ICEBERG_MANAGED`, `TABLE_DELTA_UNIFORM_HUDI_EXTERNAL`, `TABLE_DELTA_UNIFORM_ICEBERG_EXTERNAL`, `TABLE_DELTA_UNIFORM_ICEBERG_FOREIGN_DELTASHARING`, `TABLE_DELTA_UNIFORM_ICEBERG_FOREIGN_HIVE_METASTORE_EXTERNAL`, `TABLE_DELTA_UNIFORM_ICEBERG_FOREIGN_HIVE_METASTORE_MANAGED`, `TABLE_DELTA_UNIFORM_ICEBERG_FOREIGN_SNOWFLAKE`, `TABLE_EXTERNAL`, `TABLE_FEATURE_STORE`, `TABLE_FEATURE_STORE_EXTERNAL`, `TABLE_FOREIGN_BIGQUERY`, `TABLE_FOREIGN_DATABRICKS`, `TABLE_FOREIGN_DELTASHARING`, `TABLE_FOREIGN_HIVE_METASTORE`, `TABLE_FOREIGN_HIVE_METASTORE_DBFS_EXTERNAL`, `TABLE_FOREIGN_HIVE_METASTORE_DBFS_MANAGED`, `TABLE_FOREIGN_HIVE_METASTORE_DBFS_SHALLOW_CLONE_EXTERNAL`, `TABLE_FOREIGN_HIVE_METASTORE_DBFS_SHALLOW_CLONE_MANAGED`, `TABLE_FOREIGN_HIVE_METASTORE_DBFS_VIEW`, `TABLE_FOREIGN_HIVE_METASTORE_EXTERNAL`, `TABLE_FOREIGN_HIVE_METASTORE_MANAGED`, `TABLE_FOREIGN_HIVE_METASTORE_SHALLOW_CLONE_EXTERNAL`, `TABLE_FOREIGN_HIVE_METASTORE_SHALLOW_CLONE_MANAGED`, `TABLE_FOREIGN_HIVE_METASTORE_VIEW`, `TABLE_FOREIGN_MONGODB`, `TABLE_FOREIGN_MYSQL`, `TABLE_FOREIGN_NETSUITE`, `TABLE_FOREIGN_ORACLE`, `TABLE_FOREIGN_PALANTIR`, `TABLE_FOREIGN_POSTGRESQL`, `TABLE_FOREIGN_REDSHIFT`, `TABLE_FOREIGN_SALESFORCE`, `TABLE_FOREIGN_SALESFORCE_DATA_CLOUD`, `TABLE_FOREIGN_SALESFORCE_DATA_CLOUD_FILE_SHARING`, `TABLE_FOREIGN_SALESFORCE_DATA_CLOUD_FILE_SHARING_VIEW`, `TABLE_FOREIGN_SNOWFLAKE`, `TABLE_FOREIGN_SQLDW`, `TABLE_FOREIGN_SQLSERVER`, `TABLE_FOREIGN_TERADATA`, `TABLE_FOREIGN_WORKDAY_RAAS`, `TABLE_ICEBERG_UNIFORM_MANAGED`, `TABLE_INTERNAL`, `TABLE_MANAGED_POSTGRESQL`, `TABLE_MATERIALIZED_VIEW`, `TABLE_MATERIALIZED_VIEW_DELTASHARING`, `TABLE_METRIC_VIEW`, `TABLE_METRIC_VIEW_DELTASHARING`, `TABLE_ONLINE_VECTOR_INDEX_DIRECT`, `TABLE_ONLINE_VECTOR_INDEX_REPLICA`, `TABLE_ONLINE_VIEW`, `TABLE_STANDARD`, `TABLE_STREAMING_LIVE_TABLE`, `TABLE_STREAMING_LIVE_TABLE_DELTASHARING`, `TABLE_SYSTEM`, `TABLE_SYSTEM_DELTASHARING`, `TABLE_VIEW`, `TABLE_VIEW_DELTASHARING`: + case `TABLE_DB_STORAGE`, `TABLE_DELTA`, `TABLE_DELTASHARING`, `TABLE_DELTASHARING_MUTABLE`, `TABLE_DELTA_EXTERNAL`, `TABLE_DELTA_ICEBERG_DELTASHARING`, `TABLE_DELTA_ICEBERG_MANAGED`, `TABLE_DELTA_UNIFORM_HUDI_EXTERNAL`, `TABLE_DELTA_UNIFORM_ICEBERG_EXTERNAL`, `TABLE_DELTA_UNIFORM_ICEBERG_FOREIGN_DELTASHARING`, `TABLE_DELTA_UNIFORM_ICEBERG_FOREIGN_HIVE_METASTORE_EXTERNAL`, `TABLE_DELTA_UNIFORM_ICEBERG_FOREIGN_HIVE_METASTORE_MANAGED`, `TABLE_DELTA_UNIFORM_ICEBERG_FOREIGN_SNOWFLAKE`, `TABLE_EXTERNAL`, `TABLE_FEATURE_STORE`, `TABLE_FEATURE_STORE_EXTERNAL`, `TABLE_FOREIGN_BIGQUERY`, `TABLE_FOREIGN_DATABRICKS`, `TABLE_FOREIGN_DELTASHARING`, `TABLE_FOREIGN_HIVE_METASTORE`, `TABLE_FOREIGN_HIVE_METASTORE_DBFS_EXTERNAL`, `TABLE_FOREIGN_HIVE_METASTORE_DBFS_MANAGED`, `TABLE_FOREIGN_HIVE_METASTORE_DBFS_SHALLOW_CLONE_EXTERNAL`, `TABLE_FOREIGN_HIVE_METASTORE_DBFS_SHALLOW_CLONE_MANAGED`, `TABLE_FOREIGN_HIVE_METASTORE_DBFS_VIEW`, `TABLE_FOREIGN_HIVE_METASTORE_EXTERNAL`, `TABLE_FOREIGN_HIVE_METASTORE_MANAGED`, `TABLE_FOREIGN_HIVE_METASTORE_SHALLOW_CLONE_EXTERNAL`, `TABLE_FOREIGN_HIVE_METASTORE_SHALLOW_CLONE_MANAGED`, `TABLE_FOREIGN_HIVE_METASTORE_VIEW`, `TABLE_FOREIGN_MONGODB`, `TABLE_FOREIGN_MYSQL`, `TABLE_FOREIGN_NETSUITE`, `TABLE_FOREIGN_ORACLE`, `TABLE_FOREIGN_POSTGRESQL`, `TABLE_FOREIGN_REDSHIFT`, `TABLE_FOREIGN_SALESFORCE`, `TABLE_FOREIGN_SALESFORCE_DATA_CLOUD`, `TABLE_FOREIGN_SALESFORCE_DATA_CLOUD_FILE_SHARING`, `TABLE_FOREIGN_SALESFORCE_DATA_CLOUD_FILE_SHARING_VIEW`, `TABLE_FOREIGN_SNOWFLAKE`, `TABLE_FOREIGN_SQLDW`, `TABLE_FOREIGN_SQLSERVER`, `TABLE_FOREIGN_TERADATA`, `TABLE_FOREIGN_WORKDAY_RAAS`, `TABLE_ICEBERG_UNIFORM_MANAGED`, `TABLE_INTERNAL`, `TABLE_MANAGED_POSTGRESQL`, `TABLE_MATERIALIZED_VIEW`, `TABLE_MATERIALIZED_VIEW_DELTASHARING`, `TABLE_METRIC_VIEW`, `TABLE_METRIC_VIEW_DELTASHARING`, `TABLE_ONLINE_VECTOR_INDEX_DIRECT`, `TABLE_ONLINE_VECTOR_INDEX_REPLICA`, `TABLE_ONLINE_VIEW`, `TABLE_STANDARD`, `TABLE_STREAMING_LIVE_TABLE`, `TABLE_STREAMING_LIVE_TABLE_DELTASHARING`, `TABLE_SYSTEM`, `TABLE_SYSTEM_DELTASHARING`, `TABLE_VIEW`, `TABLE_VIEW_DELTASHARING`: *f = SecurableKind(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "TABLE_DB_STORAGE", "TABLE_DELTA", "TABLE_DELTASHARING", "TABLE_DELTASHARING_MUTABLE", "TABLE_DELTA_EXTERNAL", "TABLE_DELTA_ICEBERG_DELTASHARING", "TABLE_DELTA_ICEBERG_MANAGED", "TABLE_DELTA_UNIFORM_HUDI_EXTERNAL", "TABLE_DELTA_UNIFORM_ICEBERG_EXTERNAL", "TABLE_DELTA_UNIFORM_ICEBERG_FOREIGN_DELTASHARING", "TABLE_DELTA_UNIFORM_ICEBERG_FOREIGN_HIVE_METASTORE_EXTERNAL", "TABLE_DELTA_UNIFORM_ICEBERG_FOREIGN_HIVE_METASTORE_MANAGED", "TABLE_DELTA_UNIFORM_ICEBERG_FOREIGN_SNOWFLAKE", "TABLE_EXTERNAL", "TABLE_FEATURE_STORE", "TABLE_FEATURE_STORE_EXTERNAL", "TABLE_FOREIGN_BIGQUERY", "TABLE_FOREIGN_DATABRICKS", "TABLE_FOREIGN_DELTASHARING", "TABLE_FOREIGN_HIVE_METASTORE", "TABLE_FOREIGN_HIVE_METASTORE_DBFS_EXTERNAL", "TABLE_FOREIGN_HIVE_METASTORE_DBFS_MANAGED", "TABLE_FOREIGN_HIVE_METASTORE_DBFS_SHALLOW_CLONE_EXTERNAL", "TABLE_FOREIGN_HIVE_METASTORE_DBFS_SHALLOW_CLONE_MANAGED", "TABLE_FOREIGN_HIVE_METASTORE_DBFS_VIEW", "TABLE_FOREIGN_HIVE_METASTORE_EXTERNAL", "TABLE_FOREIGN_HIVE_METASTORE_MANAGED", "TABLE_FOREIGN_HIVE_METASTORE_SHALLOW_CLONE_EXTERNAL", "TABLE_FOREIGN_HIVE_METASTORE_SHALLOW_CLONE_MANAGED", "TABLE_FOREIGN_HIVE_METASTORE_VIEW", "TABLE_FOREIGN_MONGODB", "TABLE_FOREIGN_MYSQL", "TABLE_FOREIGN_NETSUITE", "TABLE_FOREIGN_ORACLE", "TABLE_FOREIGN_PALANTIR", "TABLE_FOREIGN_POSTGRESQL", "TABLE_FOREIGN_REDSHIFT", "TABLE_FOREIGN_SALESFORCE", "TABLE_FOREIGN_SALESFORCE_DATA_CLOUD", "TABLE_FOREIGN_SALESFORCE_DATA_CLOUD_FILE_SHARING", "TABLE_FOREIGN_SALESFORCE_DATA_CLOUD_FILE_SHARING_VIEW", "TABLE_FOREIGN_SNOWFLAKE", "TABLE_FOREIGN_SQLDW", "TABLE_FOREIGN_SQLSERVER", "TABLE_FOREIGN_TERADATA", "TABLE_FOREIGN_WORKDAY_RAAS", "TABLE_ICEBERG_UNIFORM_MANAGED", "TABLE_INTERNAL", "TABLE_MANAGED_POSTGRESQL", "TABLE_MATERIALIZED_VIEW", "TABLE_MATERIALIZED_VIEW_DELTASHARING", "TABLE_METRIC_VIEW", "TABLE_METRIC_VIEW_DELTASHARING", "TABLE_ONLINE_VECTOR_INDEX_DIRECT", "TABLE_ONLINE_VECTOR_INDEX_REPLICA", "TABLE_ONLINE_VIEW", "TABLE_STANDARD", "TABLE_STREAMING_LIVE_TABLE", "TABLE_STREAMING_LIVE_TABLE_DELTASHARING", "TABLE_SYSTEM", "TABLE_SYSTEM_DELTASHARING", "TABLE_VIEW", "TABLE_VIEW_DELTASHARING"`, v) + return fmt.Errorf(`value "%s" is not one of "TABLE_DB_STORAGE", "TABLE_DELTA", "TABLE_DELTASHARING", "TABLE_DELTASHARING_MUTABLE", "TABLE_DELTA_EXTERNAL", "TABLE_DELTA_ICEBERG_DELTASHARING", "TABLE_DELTA_ICEBERG_MANAGED", "TABLE_DELTA_UNIFORM_HUDI_EXTERNAL", "TABLE_DELTA_UNIFORM_ICEBERG_EXTERNAL", "TABLE_DELTA_UNIFORM_ICEBERG_FOREIGN_DELTASHARING", "TABLE_DELTA_UNIFORM_ICEBERG_FOREIGN_HIVE_METASTORE_EXTERNAL", "TABLE_DELTA_UNIFORM_ICEBERG_FOREIGN_HIVE_METASTORE_MANAGED", "TABLE_DELTA_UNIFORM_ICEBERG_FOREIGN_SNOWFLAKE", "TABLE_EXTERNAL", "TABLE_FEATURE_STORE", "TABLE_FEATURE_STORE_EXTERNAL", "TABLE_FOREIGN_BIGQUERY", "TABLE_FOREIGN_DATABRICKS", "TABLE_FOREIGN_DELTASHARING", "TABLE_FOREIGN_HIVE_METASTORE", "TABLE_FOREIGN_HIVE_METASTORE_DBFS_EXTERNAL", "TABLE_FOREIGN_HIVE_METASTORE_DBFS_MANAGED", "TABLE_FOREIGN_HIVE_METASTORE_DBFS_SHALLOW_CLONE_EXTERNAL", "TABLE_FOREIGN_HIVE_METASTORE_DBFS_SHALLOW_CLONE_MANAGED", "TABLE_FOREIGN_HIVE_METASTORE_DBFS_VIEW", "TABLE_FOREIGN_HIVE_METASTORE_EXTERNAL", "TABLE_FOREIGN_HIVE_METASTORE_MANAGED", "TABLE_FOREIGN_HIVE_METASTORE_SHALLOW_CLONE_EXTERNAL", "TABLE_FOREIGN_HIVE_METASTORE_SHALLOW_CLONE_MANAGED", "TABLE_FOREIGN_HIVE_METASTORE_VIEW", "TABLE_FOREIGN_MONGODB", "TABLE_FOREIGN_MYSQL", "TABLE_FOREIGN_NETSUITE", "TABLE_FOREIGN_ORACLE", "TABLE_FOREIGN_POSTGRESQL", "TABLE_FOREIGN_REDSHIFT", "TABLE_FOREIGN_SALESFORCE", "TABLE_FOREIGN_SALESFORCE_DATA_CLOUD", "TABLE_FOREIGN_SALESFORCE_DATA_CLOUD_FILE_SHARING", "TABLE_FOREIGN_SALESFORCE_DATA_CLOUD_FILE_SHARING_VIEW", "TABLE_FOREIGN_SNOWFLAKE", "TABLE_FOREIGN_SQLDW", "TABLE_FOREIGN_SQLSERVER", "TABLE_FOREIGN_TERADATA", "TABLE_FOREIGN_WORKDAY_RAAS", "TABLE_ICEBERG_UNIFORM_MANAGED", "TABLE_INTERNAL", "TABLE_MANAGED_POSTGRESQL", "TABLE_MATERIALIZED_VIEW", "TABLE_MATERIALIZED_VIEW_DELTASHARING", "TABLE_METRIC_VIEW", "TABLE_METRIC_VIEW_DELTASHARING", "TABLE_ONLINE_VECTOR_INDEX_DIRECT", "TABLE_ONLINE_VECTOR_INDEX_REPLICA", "TABLE_ONLINE_VIEW", "TABLE_STANDARD", "TABLE_STREAMING_LIVE_TABLE", "TABLE_STREAMING_LIVE_TABLE_DELTASHARING", "TABLE_SYSTEM", "TABLE_SYSTEM_DELTASHARING", "TABLE_VIEW", "TABLE_VIEW_DELTASHARING"`, v) } } @@ -7096,7 +7091,6 @@ func (f *SecurableKind) Values() []SecurableKind { SecurableKindTableForeignMysql, SecurableKindTableForeignNetsuite, SecurableKindTableForeignOracle, - SecurableKindTableForeignPalantir, SecurableKindTableForeignPostgresql, SecurableKindTableForeignRedshift, SecurableKindTableForeignSalesforce, diff --git a/service/compute/model.go b/service/compute/model.go index f93551947..dad3827b8 100755 --- a/service/compute/model.go +++ b/service/compute/model.go @@ -143,7 +143,7 @@ type AwsAttributes struct { // availability zone must be in the same region as the Databricks // deployment. For example, "us-west-2a" is not a valid zone id if the // Databricks deployment resides in the "us-east-1" region. This is an - // optional field at cluster creation, and if not specified, a default zone + // optional field at cluster creation, and if not specified, the zone "auto" // will be used. If the zone specified is "auto", will try to place cluster // in a zone with high availability, and will retry placement in a different // AZ if there is not enough capacity. @@ -2770,6 +2770,8 @@ const EventDetailsCauseAutorecovery EventDetailsCause = `AUTORECOVERY` const EventDetailsCauseAutoscale EventDetailsCause = `AUTOSCALE` +const EventDetailsCauseAutoscaleV2 EventDetailsCause = `AUTOSCALE_V2` + const EventDetailsCauseReplaceBadNodes EventDetailsCause = `REPLACE_BAD_NODES` const EventDetailsCauseUserRequest EventDetailsCause = `USER_REQUEST` @@ -2782,11 +2784,11 @@ func (f *EventDetailsCause) String() string { // Set raw string value and validate it against allowed values func (f *EventDetailsCause) Set(v string) error { switch v { - case `AUTORECOVERY`, `AUTOSCALE`, `REPLACE_BAD_NODES`, `USER_REQUEST`: + case `AUTORECOVERY`, `AUTOSCALE`, `AUTOSCALE_V2`, `REPLACE_BAD_NODES`, `USER_REQUEST`: *f = EventDetailsCause(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "AUTORECOVERY", "AUTOSCALE", "REPLACE_BAD_NODES", "USER_REQUEST"`, v) + return fmt.Errorf(`value "%s" is not one of "AUTORECOVERY", "AUTOSCALE", "AUTOSCALE_V2", "REPLACE_BAD_NODES", "USER_REQUEST"`, v) } } @@ -2797,6 +2799,7 @@ func (f *EventDetailsCause) Values() []EventDetailsCause { return []EventDetailsCause{ EventDetailsCauseAutorecovery, EventDetailsCauseAutoscale, + EventDetailsCauseAutoscaleV2, EventDetailsCauseReplaceBadNodes, EventDetailsCauseUserRequest, } @@ -5559,8 +5562,6 @@ const TerminationReasonCodeDockerImageTooLargeForInstanceException TerminationRe const TerminationReasonCodeDockerInvalidOsException TerminationReasonCode = `DOCKER_INVALID_OS_EXCEPTION` -const TerminationReasonCodeDriverDnsResolutionFailure TerminationReasonCode = `DRIVER_DNS_RESOLUTION_FAILURE` - const TerminationReasonCodeDriverEviction TerminationReasonCode = `DRIVER_EVICTION` const TerminationReasonCodeDriverLaunchTimeout TerminationReasonCode = `DRIVER_LAUNCH_TIMEOUT` @@ -5709,10 +5710,6 @@ const TerminationReasonCodeNetworkConfigurationFailure TerminationReasonCode = ` const TerminationReasonCodeNfsMountFailure TerminationReasonCode = `NFS_MOUNT_FAILURE` -const TerminationReasonCodeNoActivatedK8s TerminationReasonCode = `NO_ACTIVATED_K8S` - -const TerminationReasonCodeNoActivatedK8sTestingTag TerminationReasonCode = `NO_ACTIVATED_K8S_TESTING_TAG` - const TerminationReasonCodeNoMatchedK8s TerminationReasonCode = `NO_MATCHED_K8S` const TerminationReasonCodeNoMatchedK8sTestingTag TerminationReasonCode = `NO_MATCHED_K8S_TESTING_TAG` @@ -5737,8 +5734,6 @@ const TerminationReasonCodeSecretPermissionDenied TerminationReasonCode = `SECRE const TerminationReasonCodeSecretResolutionError TerminationReasonCode = `SECRET_RESOLUTION_ERROR` -const TerminationReasonCodeSecurityAgentsFailedInitialVerification TerminationReasonCode = `SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION` - const TerminationReasonCodeSecurityDaemonRegistrationException TerminationReasonCode = `SECURITY_DAEMON_REGISTRATION_EXCEPTION` const TerminationReasonCodeSelfBootstrapFailure TerminationReasonCode = `SELF_BOOTSTRAP_FAILURE` @@ -5811,11 +5806,11 @@ func (f *TerminationReasonCode) String() string { // Set raw string value and validate it against allowed values func (f *TerminationReasonCode) Set(v string) error { switch v { - case `ABUSE_DETECTED`, `ACCESS_TOKEN_FAILURE`, `ALLOCATION_TIMEOUT`, `ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY`, `ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_READY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS`, `ATTACH_PROJECT_FAILURE`, `AWS_AUTHORIZATION_FAILURE`, `AWS_INACCESSIBLE_KMS_KEY_FAILURE`, `AWS_INSTANCE_PROFILE_UPDATE_FAILURE`, `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE`, `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE`, `AWS_INVALID_KEY_PAIR`, `AWS_INVALID_KMS_KEY_STATE`, `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE`, `AWS_REQUEST_LIMIT_EXCEEDED`, `AWS_RESOURCE_QUOTA_EXCEEDED`, `AWS_UNSUPPORTED_FAILURE`, `AZURE_BYOK_KEY_PERMISSION_FAILURE`, `AZURE_EPHEMERAL_DISK_FAILURE`, `AZURE_INVALID_DEPLOYMENT_TEMPLATE`, `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION`, `AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE`, `AZURE_QUOTA_EXCEEDED_EXCEPTION`, `AZURE_RESOURCE_MANAGER_THROTTLING`, `AZURE_RESOURCE_PROVIDER_THROTTLING`, `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE`, `AZURE_VM_EXTENSION_FAILURE`, `AZURE_VNET_CONFIGURATION_FAILURE`, `BOOTSTRAP_TIMEOUT`, `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION`, `BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG`, `BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED`, `BUDGET_POLICY_RESOLUTION_FAILURE`, `CLOUD_ACCOUNT_POD_QUOTA_EXCEEDED`, `CLOUD_ACCOUNT_SETUP_FAILURE`, `CLOUD_OPERATION_CANCELLED`, `CLOUD_PROVIDER_DISK_SETUP_FAILURE`, `CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED`, `CLOUD_PROVIDER_LAUNCH_FAILURE`, `CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_SHUTDOWN`, `CLUSTER_OPERATION_THROTTLED`, `CLUSTER_OPERATION_TIMEOUT`, `COMMUNICATION_LOST`, `CONTAINER_LAUNCH_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG`, `DATABASE_CONNECTION_FAILURE`, `DATA_ACCESS_CONFIG_CHANGED`, `DBFS_COMPONENT_UNHEALTHY`, `DBR_IMAGE_RESOLUTION_FAILURE`, `DISASTER_RECOVERY_REPLICATION`, `DNS_RESOLUTION_ERROR`, `DOCKER_CONTAINER_CREATION_EXCEPTION`, `DOCKER_IMAGE_PULL_FAILURE`, `DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION`, `DOCKER_INVALID_OS_EXCEPTION`, `DRIVER_DNS_RESOLUTION_FAILURE`, `DRIVER_EVICTION`, `DRIVER_LAUNCH_TIMEOUT`, `DRIVER_NODE_UNREACHABLE`, `DRIVER_OUT_OF_DISK`, `DRIVER_OUT_OF_MEMORY`, `DRIVER_POD_CREATION_FAILURE`, `DRIVER_UNEXPECTED_FAILURE`, `DRIVER_UNHEALTHY`, `DRIVER_UNREACHABLE`, `DRIVER_UNRESPONSIVE`, `DYNAMIC_SPARK_CONF_SIZE_EXCEEDED`, `EOS_SPARK_IMAGE`, `EXECUTION_COMPONENT_UNHEALTHY`, `EXECUTOR_POD_UNSCHEDULED`, `GCP_API_RATE_QUOTA_EXCEEDED`, `GCP_DENIED_BY_ORG_POLICY`, `GCP_FORBIDDEN`, `GCP_IAM_TIMEOUT`, `GCP_INACCESSIBLE_KMS_KEY_FAILURE`, `GCP_INSUFFICIENT_CAPACITY`, `GCP_IP_SPACE_EXHAUSTED`, `GCP_KMS_KEY_PERMISSION_DENIED`, `GCP_NOT_FOUND`, `GCP_QUOTA_EXCEEDED`, `GCP_RESOURCE_QUOTA_EXCEEDED`, `GCP_SERVICE_ACCOUNT_ACCESS_DENIED`, `GCP_SERVICE_ACCOUNT_DELETED`, `GCP_SERVICE_ACCOUNT_NOT_FOUND`, `GCP_SUBNET_NOT_READY`, `GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED`, `GKE_BASED_CLUSTER_TERMINATION`, `GLOBAL_INIT_SCRIPT_FAILURE`, `HIVE_METASTORE_PROVISIONING_FAILURE`, `IMAGE_PULL_PERMISSION_DENIED`, `INACTIVITY`, `INIT_CONTAINER_NOT_FINISHED`, `INIT_SCRIPT_FAILURE`, `INSTANCE_POOL_CLUSTER_FAILURE`, `INSTANCE_POOL_MAX_CAPACITY_REACHED`, `INSTANCE_POOL_NOT_FOUND`, `INSTANCE_UNREACHABLE`, `INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG`, `INTERNAL_CAPACITY_FAILURE`, `INTERNAL_ERROR`, `INVALID_ARGUMENT`, `INVALID_AWS_PARAMETER`, `INVALID_INSTANCE_PLACEMENT_PROTOCOL`, `INVALID_SPARK_IMAGE`, `INVALID_WORKER_IMAGE_FAILURE`, `IN_PENALTY_BOX`, `IP_EXHAUSTION_FAILURE`, `JOB_FINISHED`, `K8S_ACTIVE_POD_QUOTA_EXCEEDED`, `K8S_AUTOSCALING_FAILURE`, `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT`, `LAZY_ALLOCATION_TIMEOUT`, `MAINTENANCE_MODE`, `METASTORE_COMPONENT_UNHEALTHY`, `NEPHOS_RESOURCE_MANAGEMENT`, `NETVISOR_SETUP_TIMEOUT`, `NETWORK_CHECK_CONTROL_PLANE_FAILURE`, `NETWORK_CHECK_CONTROL_PLANE_FAILURE_DUE_TO_MISCONFIG`, `NETWORK_CHECK_DNS_SERVER_FAILURE`, `NETWORK_CHECK_DNS_SERVER_FAILURE_DUE_TO_MISCONFIG`, `NETWORK_CHECK_METADATA_ENDPOINT_FAILURE`, `NETWORK_CHECK_METADATA_ENDPOINT_FAILURE_DUE_TO_MISCONFIG`, `NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE`, `NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE_DUE_TO_MISCONFIG`, `NETWORK_CHECK_NIC_FAILURE`, `NETWORK_CHECK_NIC_FAILURE_DUE_TO_MISCONFIG`, `NETWORK_CHECK_STORAGE_FAILURE`, `NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG`, `NETWORK_CONFIGURATION_FAILURE`, `NFS_MOUNT_FAILURE`, `NO_ACTIVATED_K8S`, `NO_ACTIVATED_K8S_TESTING_TAG`, `NO_MATCHED_K8S`, `NO_MATCHED_K8S_TESTING_TAG`, `NPIP_TUNNEL_SETUP_FAILURE`, `NPIP_TUNNEL_TOKEN_FAILURE`, `POD_ASSIGNMENT_FAILURE`, `POD_SCHEDULING_FAILURE`, `REQUEST_REJECTED`, `REQUEST_THROTTLED`, `RESOURCE_USAGE_BLOCKED`, `SECRET_CREATION_FAILURE`, `SECRET_PERMISSION_DENIED`, `SECRET_RESOLUTION_ERROR`, `SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION`, `SECURITY_DAEMON_REGISTRATION_EXCEPTION`, `SELF_BOOTSTRAP_FAILURE`, `SERVERLESS_LONG_RUNNING_TERMINATED`, `SKIPPED_SLOW_NODES`, `SLOW_IMAGE_DOWNLOAD`, `SPARK_ERROR`, `SPARK_IMAGE_DOWNLOAD_FAILURE`, `SPARK_IMAGE_DOWNLOAD_THROTTLED`, `SPARK_IMAGE_NOT_FOUND`, `SPARK_STARTUP_FAILURE`, `SPOT_INSTANCE_TERMINATION`, `SSH_BOOTSTRAP_FAILURE`, `STORAGE_DOWNLOAD_FAILURE`, `STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG`, `STORAGE_DOWNLOAD_FAILURE_SLOW`, `STORAGE_DOWNLOAD_FAILURE_THROTTLED`, `STS_CLIENT_SETUP_FAILURE`, `SUBNET_EXHAUSTED_FAILURE`, `TEMPORARILY_UNAVAILABLE`, `TRIAL_EXPIRED`, `UNEXPECTED_LAUNCH_FAILURE`, `UNEXPECTED_POD_RECREATION`, `UNKNOWN`, `UNSUPPORTED_INSTANCE_TYPE`, `UPDATE_INSTANCE_PROFILE_FAILURE`, `USAGE_POLICY_ENTITLEMENT_DENIED`, `USER_INITIATED_VM_TERMINATION`, `USER_REQUEST`, `WORKER_SETUP_FAILURE`, `WORKSPACE_CANCELLED_ERROR`, `WORKSPACE_CONFIGURATION_ERROR`, `WORKSPACE_UPDATE`: + case `ABUSE_DETECTED`, `ACCESS_TOKEN_FAILURE`, `ALLOCATION_TIMEOUT`, `ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY`, `ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_READY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS`, `ATTACH_PROJECT_FAILURE`, `AWS_AUTHORIZATION_FAILURE`, `AWS_INACCESSIBLE_KMS_KEY_FAILURE`, `AWS_INSTANCE_PROFILE_UPDATE_FAILURE`, `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE`, `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE`, `AWS_INVALID_KEY_PAIR`, `AWS_INVALID_KMS_KEY_STATE`, `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE`, `AWS_REQUEST_LIMIT_EXCEEDED`, `AWS_RESOURCE_QUOTA_EXCEEDED`, `AWS_UNSUPPORTED_FAILURE`, `AZURE_BYOK_KEY_PERMISSION_FAILURE`, `AZURE_EPHEMERAL_DISK_FAILURE`, `AZURE_INVALID_DEPLOYMENT_TEMPLATE`, `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION`, `AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE`, `AZURE_QUOTA_EXCEEDED_EXCEPTION`, `AZURE_RESOURCE_MANAGER_THROTTLING`, `AZURE_RESOURCE_PROVIDER_THROTTLING`, `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE`, `AZURE_VM_EXTENSION_FAILURE`, `AZURE_VNET_CONFIGURATION_FAILURE`, `BOOTSTRAP_TIMEOUT`, `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION`, `BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG`, `BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED`, `BUDGET_POLICY_RESOLUTION_FAILURE`, `CLOUD_ACCOUNT_POD_QUOTA_EXCEEDED`, `CLOUD_ACCOUNT_SETUP_FAILURE`, `CLOUD_OPERATION_CANCELLED`, `CLOUD_PROVIDER_DISK_SETUP_FAILURE`, `CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED`, `CLOUD_PROVIDER_LAUNCH_FAILURE`, `CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_SHUTDOWN`, `CLUSTER_OPERATION_THROTTLED`, `CLUSTER_OPERATION_TIMEOUT`, `COMMUNICATION_LOST`, `CONTAINER_LAUNCH_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG`, `DATABASE_CONNECTION_FAILURE`, `DATA_ACCESS_CONFIG_CHANGED`, `DBFS_COMPONENT_UNHEALTHY`, `DBR_IMAGE_RESOLUTION_FAILURE`, `DISASTER_RECOVERY_REPLICATION`, `DNS_RESOLUTION_ERROR`, `DOCKER_CONTAINER_CREATION_EXCEPTION`, `DOCKER_IMAGE_PULL_FAILURE`, `DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION`, `DOCKER_INVALID_OS_EXCEPTION`, `DRIVER_EVICTION`, `DRIVER_LAUNCH_TIMEOUT`, `DRIVER_NODE_UNREACHABLE`, `DRIVER_OUT_OF_DISK`, `DRIVER_OUT_OF_MEMORY`, `DRIVER_POD_CREATION_FAILURE`, `DRIVER_UNEXPECTED_FAILURE`, `DRIVER_UNHEALTHY`, `DRIVER_UNREACHABLE`, `DRIVER_UNRESPONSIVE`, `DYNAMIC_SPARK_CONF_SIZE_EXCEEDED`, `EOS_SPARK_IMAGE`, `EXECUTION_COMPONENT_UNHEALTHY`, `EXECUTOR_POD_UNSCHEDULED`, `GCP_API_RATE_QUOTA_EXCEEDED`, `GCP_DENIED_BY_ORG_POLICY`, `GCP_FORBIDDEN`, `GCP_IAM_TIMEOUT`, `GCP_INACCESSIBLE_KMS_KEY_FAILURE`, `GCP_INSUFFICIENT_CAPACITY`, `GCP_IP_SPACE_EXHAUSTED`, `GCP_KMS_KEY_PERMISSION_DENIED`, `GCP_NOT_FOUND`, `GCP_QUOTA_EXCEEDED`, `GCP_RESOURCE_QUOTA_EXCEEDED`, `GCP_SERVICE_ACCOUNT_ACCESS_DENIED`, `GCP_SERVICE_ACCOUNT_DELETED`, `GCP_SERVICE_ACCOUNT_NOT_FOUND`, `GCP_SUBNET_NOT_READY`, `GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED`, `GKE_BASED_CLUSTER_TERMINATION`, `GLOBAL_INIT_SCRIPT_FAILURE`, `HIVE_METASTORE_PROVISIONING_FAILURE`, `IMAGE_PULL_PERMISSION_DENIED`, `INACTIVITY`, `INIT_CONTAINER_NOT_FINISHED`, `INIT_SCRIPT_FAILURE`, `INSTANCE_POOL_CLUSTER_FAILURE`, `INSTANCE_POOL_MAX_CAPACITY_REACHED`, `INSTANCE_POOL_NOT_FOUND`, `INSTANCE_UNREACHABLE`, `INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG`, `INTERNAL_CAPACITY_FAILURE`, `INTERNAL_ERROR`, `INVALID_ARGUMENT`, `INVALID_AWS_PARAMETER`, `INVALID_INSTANCE_PLACEMENT_PROTOCOL`, `INVALID_SPARK_IMAGE`, `INVALID_WORKER_IMAGE_FAILURE`, `IN_PENALTY_BOX`, `IP_EXHAUSTION_FAILURE`, `JOB_FINISHED`, `K8S_ACTIVE_POD_QUOTA_EXCEEDED`, `K8S_AUTOSCALING_FAILURE`, `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT`, `LAZY_ALLOCATION_TIMEOUT`, `MAINTENANCE_MODE`, `METASTORE_COMPONENT_UNHEALTHY`, `NEPHOS_RESOURCE_MANAGEMENT`, `NETVISOR_SETUP_TIMEOUT`, `NETWORK_CHECK_CONTROL_PLANE_FAILURE`, `NETWORK_CHECK_CONTROL_PLANE_FAILURE_DUE_TO_MISCONFIG`, `NETWORK_CHECK_DNS_SERVER_FAILURE`, `NETWORK_CHECK_DNS_SERVER_FAILURE_DUE_TO_MISCONFIG`, `NETWORK_CHECK_METADATA_ENDPOINT_FAILURE`, `NETWORK_CHECK_METADATA_ENDPOINT_FAILURE_DUE_TO_MISCONFIG`, `NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE`, `NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE_DUE_TO_MISCONFIG`, `NETWORK_CHECK_NIC_FAILURE`, `NETWORK_CHECK_NIC_FAILURE_DUE_TO_MISCONFIG`, `NETWORK_CHECK_STORAGE_FAILURE`, `NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG`, `NETWORK_CONFIGURATION_FAILURE`, `NFS_MOUNT_FAILURE`, `NO_MATCHED_K8S`, `NO_MATCHED_K8S_TESTING_TAG`, `NPIP_TUNNEL_SETUP_FAILURE`, `NPIP_TUNNEL_TOKEN_FAILURE`, `POD_ASSIGNMENT_FAILURE`, `POD_SCHEDULING_FAILURE`, `REQUEST_REJECTED`, `REQUEST_THROTTLED`, `RESOURCE_USAGE_BLOCKED`, `SECRET_CREATION_FAILURE`, `SECRET_PERMISSION_DENIED`, `SECRET_RESOLUTION_ERROR`, `SECURITY_DAEMON_REGISTRATION_EXCEPTION`, `SELF_BOOTSTRAP_FAILURE`, `SERVERLESS_LONG_RUNNING_TERMINATED`, `SKIPPED_SLOW_NODES`, `SLOW_IMAGE_DOWNLOAD`, `SPARK_ERROR`, `SPARK_IMAGE_DOWNLOAD_FAILURE`, `SPARK_IMAGE_DOWNLOAD_THROTTLED`, `SPARK_IMAGE_NOT_FOUND`, `SPARK_STARTUP_FAILURE`, `SPOT_INSTANCE_TERMINATION`, `SSH_BOOTSTRAP_FAILURE`, `STORAGE_DOWNLOAD_FAILURE`, `STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG`, `STORAGE_DOWNLOAD_FAILURE_SLOW`, `STORAGE_DOWNLOAD_FAILURE_THROTTLED`, `STS_CLIENT_SETUP_FAILURE`, `SUBNET_EXHAUSTED_FAILURE`, `TEMPORARILY_UNAVAILABLE`, `TRIAL_EXPIRED`, `UNEXPECTED_LAUNCH_FAILURE`, `UNEXPECTED_POD_RECREATION`, `UNKNOWN`, `UNSUPPORTED_INSTANCE_TYPE`, `UPDATE_INSTANCE_PROFILE_FAILURE`, `USAGE_POLICY_ENTITLEMENT_DENIED`, `USER_INITIATED_VM_TERMINATION`, `USER_REQUEST`, `WORKER_SETUP_FAILURE`, `WORKSPACE_CANCELLED_ERROR`, `WORKSPACE_CONFIGURATION_ERROR`, `WORKSPACE_UPDATE`: *f = TerminationReasonCode(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ABUSE_DETECTED", "ACCESS_TOKEN_FAILURE", "ALLOCATION_TIMEOUT", "ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY", "ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS", "ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_READY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS", "ATTACH_PROJECT_FAILURE", "AWS_AUTHORIZATION_FAILURE", "AWS_INACCESSIBLE_KMS_KEY_FAILURE", "AWS_INSTANCE_PROFILE_UPDATE_FAILURE", "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE", "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE", "AWS_INVALID_KEY_PAIR", "AWS_INVALID_KMS_KEY_STATE", "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE", "AWS_REQUEST_LIMIT_EXCEEDED", "AWS_RESOURCE_QUOTA_EXCEEDED", "AWS_UNSUPPORTED_FAILURE", "AZURE_BYOK_KEY_PERMISSION_FAILURE", "AZURE_EPHEMERAL_DISK_FAILURE", "AZURE_INVALID_DEPLOYMENT_TEMPLATE", "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION", "AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE", "AZURE_QUOTA_EXCEEDED_EXCEPTION", "AZURE_RESOURCE_MANAGER_THROTTLING", "AZURE_RESOURCE_PROVIDER_THROTTLING", "AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE", "AZURE_VM_EXTENSION_FAILURE", "AZURE_VNET_CONFIGURATION_FAILURE", "BOOTSTRAP_TIMEOUT", "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION", "BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG", "BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED", "BUDGET_POLICY_RESOLUTION_FAILURE", "CLOUD_ACCOUNT_POD_QUOTA_EXCEEDED", "CLOUD_ACCOUNT_SETUP_FAILURE", "CLOUD_OPERATION_CANCELLED", "CLOUD_PROVIDER_DISK_SETUP_FAILURE", "CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED", "CLOUD_PROVIDER_LAUNCH_FAILURE", "CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_RESOURCE_STOCKOUT", "CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_SHUTDOWN", "CLUSTER_OPERATION_THROTTLED", "CLUSTER_OPERATION_TIMEOUT", "COMMUNICATION_LOST", "CONTAINER_LAUNCH_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG", "DATABASE_CONNECTION_FAILURE", "DATA_ACCESS_CONFIG_CHANGED", "DBFS_COMPONENT_UNHEALTHY", "DBR_IMAGE_RESOLUTION_FAILURE", "DISASTER_RECOVERY_REPLICATION", "DNS_RESOLUTION_ERROR", "DOCKER_CONTAINER_CREATION_EXCEPTION", "DOCKER_IMAGE_PULL_FAILURE", "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION", "DOCKER_INVALID_OS_EXCEPTION", "DRIVER_DNS_RESOLUTION_FAILURE", "DRIVER_EVICTION", "DRIVER_LAUNCH_TIMEOUT", "DRIVER_NODE_UNREACHABLE", "DRIVER_OUT_OF_DISK", "DRIVER_OUT_OF_MEMORY", "DRIVER_POD_CREATION_FAILURE", "DRIVER_UNEXPECTED_FAILURE", "DRIVER_UNHEALTHY", "DRIVER_UNREACHABLE", "DRIVER_UNRESPONSIVE", "DYNAMIC_SPARK_CONF_SIZE_EXCEEDED", "EOS_SPARK_IMAGE", "EXECUTION_COMPONENT_UNHEALTHY", "EXECUTOR_POD_UNSCHEDULED", "GCP_API_RATE_QUOTA_EXCEEDED", "GCP_DENIED_BY_ORG_POLICY", "GCP_FORBIDDEN", "GCP_IAM_TIMEOUT", "GCP_INACCESSIBLE_KMS_KEY_FAILURE", "GCP_INSUFFICIENT_CAPACITY", "GCP_IP_SPACE_EXHAUSTED", "GCP_KMS_KEY_PERMISSION_DENIED", "GCP_NOT_FOUND", "GCP_QUOTA_EXCEEDED", "GCP_RESOURCE_QUOTA_EXCEEDED", "GCP_SERVICE_ACCOUNT_ACCESS_DENIED", "GCP_SERVICE_ACCOUNT_DELETED", "GCP_SERVICE_ACCOUNT_NOT_FOUND", "GCP_SUBNET_NOT_READY", "GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED", "GKE_BASED_CLUSTER_TERMINATION", "GLOBAL_INIT_SCRIPT_FAILURE", "HIVE_METASTORE_PROVISIONING_FAILURE", "IMAGE_PULL_PERMISSION_DENIED", "INACTIVITY", "INIT_CONTAINER_NOT_FINISHED", "INIT_SCRIPT_FAILURE", "INSTANCE_POOL_CLUSTER_FAILURE", "INSTANCE_POOL_MAX_CAPACITY_REACHED", "INSTANCE_POOL_NOT_FOUND", "INSTANCE_UNREACHABLE", "INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG", "INTERNAL_CAPACITY_FAILURE", "INTERNAL_ERROR", "INVALID_ARGUMENT", "INVALID_AWS_PARAMETER", "INVALID_INSTANCE_PLACEMENT_PROTOCOL", "INVALID_SPARK_IMAGE", "INVALID_WORKER_IMAGE_FAILURE", "IN_PENALTY_BOX", "IP_EXHAUSTION_FAILURE", "JOB_FINISHED", "K8S_ACTIVE_POD_QUOTA_EXCEEDED", "K8S_AUTOSCALING_FAILURE", "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT", "LAZY_ALLOCATION_TIMEOUT", "MAINTENANCE_MODE", "METASTORE_COMPONENT_UNHEALTHY", "NEPHOS_RESOURCE_MANAGEMENT", "NETVISOR_SETUP_TIMEOUT", "NETWORK_CHECK_CONTROL_PLANE_FAILURE", "NETWORK_CHECK_CONTROL_PLANE_FAILURE_DUE_TO_MISCONFIG", "NETWORK_CHECK_DNS_SERVER_FAILURE", "NETWORK_CHECK_DNS_SERVER_FAILURE_DUE_TO_MISCONFIG", "NETWORK_CHECK_METADATA_ENDPOINT_FAILURE", "NETWORK_CHECK_METADATA_ENDPOINT_FAILURE_DUE_TO_MISCONFIG", "NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE", "NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE_DUE_TO_MISCONFIG", "NETWORK_CHECK_NIC_FAILURE", "NETWORK_CHECK_NIC_FAILURE_DUE_TO_MISCONFIG", "NETWORK_CHECK_STORAGE_FAILURE", "NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG", "NETWORK_CONFIGURATION_FAILURE", "NFS_MOUNT_FAILURE", "NO_ACTIVATED_K8S", "NO_ACTIVATED_K8S_TESTING_TAG", "NO_MATCHED_K8S", "NO_MATCHED_K8S_TESTING_TAG", "NPIP_TUNNEL_SETUP_FAILURE", "NPIP_TUNNEL_TOKEN_FAILURE", "POD_ASSIGNMENT_FAILURE", "POD_SCHEDULING_FAILURE", "REQUEST_REJECTED", "REQUEST_THROTTLED", "RESOURCE_USAGE_BLOCKED", "SECRET_CREATION_FAILURE", "SECRET_PERMISSION_DENIED", "SECRET_RESOLUTION_ERROR", "SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION", "SECURITY_DAEMON_REGISTRATION_EXCEPTION", "SELF_BOOTSTRAP_FAILURE", "SERVERLESS_LONG_RUNNING_TERMINATED", "SKIPPED_SLOW_NODES", "SLOW_IMAGE_DOWNLOAD", "SPARK_ERROR", "SPARK_IMAGE_DOWNLOAD_FAILURE", "SPARK_IMAGE_DOWNLOAD_THROTTLED", "SPARK_IMAGE_NOT_FOUND", "SPARK_STARTUP_FAILURE", "SPOT_INSTANCE_TERMINATION", "SSH_BOOTSTRAP_FAILURE", "STORAGE_DOWNLOAD_FAILURE", "STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG", "STORAGE_DOWNLOAD_FAILURE_SLOW", "STORAGE_DOWNLOAD_FAILURE_THROTTLED", "STS_CLIENT_SETUP_FAILURE", "SUBNET_EXHAUSTED_FAILURE", "TEMPORARILY_UNAVAILABLE", "TRIAL_EXPIRED", "UNEXPECTED_LAUNCH_FAILURE", "UNEXPECTED_POD_RECREATION", "UNKNOWN", "UNSUPPORTED_INSTANCE_TYPE", "UPDATE_INSTANCE_PROFILE_FAILURE", "USAGE_POLICY_ENTITLEMENT_DENIED", "USER_INITIATED_VM_TERMINATION", "USER_REQUEST", "WORKER_SETUP_FAILURE", "WORKSPACE_CANCELLED_ERROR", "WORKSPACE_CONFIGURATION_ERROR", "WORKSPACE_UPDATE"`, v) + return fmt.Errorf(`value "%s" is not one of "ABUSE_DETECTED", "ACCESS_TOKEN_FAILURE", "ALLOCATION_TIMEOUT", "ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY", "ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS", "ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_READY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS", "ATTACH_PROJECT_FAILURE", "AWS_AUTHORIZATION_FAILURE", "AWS_INACCESSIBLE_KMS_KEY_FAILURE", "AWS_INSTANCE_PROFILE_UPDATE_FAILURE", "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE", "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE", "AWS_INVALID_KEY_PAIR", "AWS_INVALID_KMS_KEY_STATE", "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE", "AWS_REQUEST_LIMIT_EXCEEDED", "AWS_RESOURCE_QUOTA_EXCEEDED", "AWS_UNSUPPORTED_FAILURE", "AZURE_BYOK_KEY_PERMISSION_FAILURE", "AZURE_EPHEMERAL_DISK_FAILURE", "AZURE_INVALID_DEPLOYMENT_TEMPLATE", "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION", "AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE", "AZURE_QUOTA_EXCEEDED_EXCEPTION", "AZURE_RESOURCE_MANAGER_THROTTLING", "AZURE_RESOURCE_PROVIDER_THROTTLING", "AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE", "AZURE_VM_EXTENSION_FAILURE", "AZURE_VNET_CONFIGURATION_FAILURE", "BOOTSTRAP_TIMEOUT", "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION", "BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG", "BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED", "BUDGET_POLICY_RESOLUTION_FAILURE", "CLOUD_ACCOUNT_POD_QUOTA_EXCEEDED", "CLOUD_ACCOUNT_SETUP_FAILURE", "CLOUD_OPERATION_CANCELLED", "CLOUD_PROVIDER_DISK_SETUP_FAILURE", "CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED", "CLOUD_PROVIDER_LAUNCH_FAILURE", "CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_RESOURCE_STOCKOUT", "CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_SHUTDOWN", "CLUSTER_OPERATION_THROTTLED", "CLUSTER_OPERATION_TIMEOUT", "COMMUNICATION_LOST", "CONTAINER_LAUNCH_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG", "DATABASE_CONNECTION_FAILURE", "DATA_ACCESS_CONFIG_CHANGED", "DBFS_COMPONENT_UNHEALTHY", "DBR_IMAGE_RESOLUTION_FAILURE", "DISASTER_RECOVERY_REPLICATION", "DNS_RESOLUTION_ERROR", "DOCKER_CONTAINER_CREATION_EXCEPTION", "DOCKER_IMAGE_PULL_FAILURE", "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION", "DOCKER_INVALID_OS_EXCEPTION", "DRIVER_EVICTION", "DRIVER_LAUNCH_TIMEOUT", "DRIVER_NODE_UNREACHABLE", "DRIVER_OUT_OF_DISK", "DRIVER_OUT_OF_MEMORY", "DRIVER_POD_CREATION_FAILURE", "DRIVER_UNEXPECTED_FAILURE", "DRIVER_UNHEALTHY", "DRIVER_UNREACHABLE", "DRIVER_UNRESPONSIVE", "DYNAMIC_SPARK_CONF_SIZE_EXCEEDED", "EOS_SPARK_IMAGE", "EXECUTION_COMPONENT_UNHEALTHY", "EXECUTOR_POD_UNSCHEDULED", "GCP_API_RATE_QUOTA_EXCEEDED", "GCP_DENIED_BY_ORG_POLICY", "GCP_FORBIDDEN", "GCP_IAM_TIMEOUT", "GCP_INACCESSIBLE_KMS_KEY_FAILURE", "GCP_INSUFFICIENT_CAPACITY", "GCP_IP_SPACE_EXHAUSTED", "GCP_KMS_KEY_PERMISSION_DENIED", "GCP_NOT_FOUND", "GCP_QUOTA_EXCEEDED", "GCP_RESOURCE_QUOTA_EXCEEDED", "GCP_SERVICE_ACCOUNT_ACCESS_DENIED", "GCP_SERVICE_ACCOUNT_DELETED", "GCP_SERVICE_ACCOUNT_NOT_FOUND", "GCP_SUBNET_NOT_READY", "GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED", "GKE_BASED_CLUSTER_TERMINATION", "GLOBAL_INIT_SCRIPT_FAILURE", "HIVE_METASTORE_PROVISIONING_FAILURE", "IMAGE_PULL_PERMISSION_DENIED", "INACTIVITY", "INIT_CONTAINER_NOT_FINISHED", "INIT_SCRIPT_FAILURE", "INSTANCE_POOL_CLUSTER_FAILURE", "INSTANCE_POOL_MAX_CAPACITY_REACHED", "INSTANCE_POOL_NOT_FOUND", "INSTANCE_UNREACHABLE", "INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG", "INTERNAL_CAPACITY_FAILURE", "INTERNAL_ERROR", "INVALID_ARGUMENT", "INVALID_AWS_PARAMETER", "INVALID_INSTANCE_PLACEMENT_PROTOCOL", "INVALID_SPARK_IMAGE", "INVALID_WORKER_IMAGE_FAILURE", "IN_PENALTY_BOX", "IP_EXHAUSTION_FAILURE", "JOB_FINISHED", "K8S_ACTIVE_POD_QUOTA_EXCEEDED", "K8S_AUTOSCALING_FAILURE", "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT", "LAZY_ALLOCATION_TIMEOUT", "MAINTENANCE_MODE", "METASTORE_COMPONENT_UNHEALTHY", "NEPHOS_RESOURCE_MANAGEMENT", "NETVISOR_SETUP_TIMEOUT", "NETWORK_CHECK_CONTROL_PLANE_FAILURE", "NETWORK_CHECK_CONTROL_PLANE_FAILURE_DUE_TO_MISCONFIG", "NETWORK_CHECK_DNS_SERVER_FAILURE", "NETWORK_CHECK_DNS_SERVER_FAILURE_DUE_TO_MISCONFIG", "NETWORK_CHECK_METADATA_ENDPOINT_FAILURE", "NETWORK_CHECK_METADATA_ENDPOINT_FAILURE_DUE_TO_MISCONFIG", "NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE", "NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE_DUE_TO_MISCONFIG", "NETWORK_CHECK_NIC_FAILURE", "NETWORK_CHECK_NIC_FAILURE_DUE_TO_MISCONFIG", "NETWORK_CHECK_STORAGE_FAILURE", "NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG", "NETWORK_CONFIGURATION_FAILURE", "NFS_MOUNT_FAILURE", "NO_MATCHED_K8S", "NO_MATCHED_K8S_TESTING_TAG", "NPIP_TUNNEL_SETUP_FAILURE", "NPIP_TUNNEL_TOKEN_FAILURE", "POD_ASSIGNMENT_FAILURE", "POD_SCHEDULING_FAILURE", "REQUEST_REJECTED", "REQUEST_THROTTLED", "RESOURCE_USAGE_BLOCKED", "SECRET_CREATION_FAILURE", "SECRET_PERMISSION_DENIED", "SECRET_RESOLUTION_ERROR", "SECURITY_DAEMON_REGISTRATION_EXCEPTION", "SELF_BOOTSTRAP_FAILURE", "SERVERLESS_LONG_RUNNING_TERMINATED", "SKIPPED_SLOW_NODES", "SLOW_IMAGE_DOWNLOAD", "SPARK_ERROR", "SPARK_IMAGE_DOWNLOAD_FAILURE", "SPARK_IMAGE_DOWNLOAD_THROTTLED", "SPARK_IMAGE_NOT_FOUND", "SPARK_STARTUP_FAILURE", "SPOT_INSTANCE_TERMINATION", "SSH_BOOTSTRAP_FAILURE", "STORAGE_DOWNLOAD_FAILURE", "STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG", "STORAGE_DOWNLOAD_FAILURE_SLOW", "STORAGE_DOWNLOAD_FAILURE_THROTTLED", "STS_CLIENT_SETUP_FAILURE", "SUBNET_EXHAUSTED_FAILURE", "TEMPORARILY_UNAVAILABLE", "TRIAL_EXPIRED", "UNEXPECTED_LAUNCH_FAILURE", "UNEXPECTED_POD_RECREATION", "UNKNOWN", "UNSUPPORTED_INSTANCE_TYPE", "UPDATE_INSTANCE_PROFILE_FAILURE", "USAGE_POLICY_ENTITLEMENT_DENIED", "USER_INITIATED_VM_TERMINATION", "USER_REQUEST", "WORKER_SETUP_FAILURE", "WORKSPACE_CANCELLED_ERROR", "WORKSPACE_CONFIGURATION_ERROR", "WORKSPACE_UPDATE"`, v) } } @@ -5888,7 +5883,6 @@ func (f *TerminationReasonCode) Values() []TerminationReasonCode { TerminationReasonCodeDockerImagePullFailure, TerminationReasonCodeDockerImageTooLargeForInstanceException, TerminationReasonCodeDockerInvalidOsException, - TerminationReasonCodeDriverDnsResolutionFailure, TerminationReasonCodeDriverEviction, TerminationReasonCodeDriverLaunchTimeout, TerminationReasonCodeDriverNodeUnreachable, @@ -5963,8 +5957,6 @@ func (f *TerminationReasonCode) Values() []TerminationReasonCode { TerminationReasonCodeNetworkCheckStorageFailureDueToMisconfig, TerminationReasonCodeNetworkConfigurationFailure, TerminationReasonCodeNfsMountFailure, - TerminationReasonCodeNoActivatedK8s, - TerminationReasonCodeNoActivatedK8sTestingTag, TerminationReasonCodeNoMatchedK8s, TerminationReasonCodeNoMatchedK8sTestingTag, TerminationReasonCodeNpipTunnelSetupFailure, @@ -5977,7 +5969,6 @@ func (f *TerminationReasonCode) Values() []TerminationReasonCode { TerminationReasonCodeSecretCreationFailure, TerminationReasonCodeSecretPermissionDenied, TerminationReasonCodeSecretResolutionError, - TerminationReasonCodeSecurityAgentsFailedInitialVerification, TerminationReasonCodeSecurityDaemonRegistrationException, TerminationReasonCodeSelfBootstrapFailure, TerminationReasonCodeServerlessLongRunningTerminated, diff --git a/service/dashboards/api.go b/service/dashboards/api.go index 2e1ad3b04..c21fb6d6d 100755 --- a/service/dashboards/api.go +++ b/service/dashboards/api.go @@ -33,6 +33,9 @@ type GenieInterface interface { // Deprecated: use [GenieAPIInterface.CreateMessage].Get() or [GenieAPIInterface.WaitGetMessageGenieCompleted] CreateMessageAndWait(ctx context.Context, genieCreateConversationMessageRequest GenieCreateConversationMessageRequest, options ...retries.Option[GenieMessage]) (*GenieMessage, error) + // Creates a Genie space from a serialized payload. + CreateSpace(ctx context.Context, request GenieCreateSpaceRequest) (*GenieSpace, error) + // Delete a conversation. DeleteConversation(ctx context.Context, request GenieDeleteConversationRequest) error @@ -119,6 +122,9 @@ type GenieInterface interface { // Move a Genie Space to the trash. TrashSpaceBySpaceId(ctx context.Context, spaceId string) error + + // Updates a Genie space with a serialized payload. + UpdateSpace(ctx context.Context, request GenieUpdateSpaceRequest) (*GenieSpace, error) } func NewGenie(client *client.DatabricksClient) *GenieAPI { diff --git a/service/dashboards/impl.go b/service/dashboards/impl.go index 4e70904a0..d7b7aae2c 100755 --- a/service/dashboards/impl.go +++ b/service/dashboards/impl.go @@ -28,6 +28,17 @@ func (a *genieImpl) CreateMessage(ctx context.Context, request GenieCreateConver return &genieMessage, err } +func (a *genieImpl) CreateSpace(ctx context.Context, request GenieCreateSpaceRequest) (*GenieSpace, error) { + var genieSpace GenieSpace + path := "/api/2.0/genie/spaces" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &genieSpace) + return &genieSpace, err +} + func (a *genieImpl) DeleteConversation(ctx context.Context, request GenieDeleteConversationRequest) error { path := fmt.Sprintf("/api/2.0/genie/spaces/%v/conversations/%v", request.SpaceId, request.ConversationId) queryParams := make(map[string]any) @@ -176,6 +187,17 @@ func (a *genieImpl) TrashSpace(ctx context.Context, request GenieTrashSpaceReque return err } +func (a *genieImpl) UpdateSpace(ctx context.Context, request GenieUpdateSpaceRequest) (*GenieSpace, error) { + var genieSpace GenieSpace + path := fmt.Sprintf("/api/2.0/genie/spaces/%v", request.SpaceId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &genieSpace) + return &genieSpace, err +} + // unexported type that holds implementations of just Lakeview API methods type lakeviewImpl struct { client *client.DatabricksClient diff --git a/service/dashboards/interface.go b/service/dashboards/interface.go index 8d97abbc7..decee88b3 100755 --- a/service/dashboards/interface.go +++ b/service/dashboards/interface.go @@ -20,6 +20,9 @@ type GenieService interface { // to respond. CreateMessage(ctx context.Context, request GenieCreateConversationMessageRequest) (*GenieMessage, error) + // Creates a Genie space from a serialized payload. + CreateSpace(ctx context.Context, request GenieCreateSpaceRequest) (*GenieSpace, error) + // Delete a conversation. DeleteConversation(ctx context.Context, request GenieDeleteConversationRequest) error @@ -70,6 +73,9 @@ type GenieService interface { // Move a Genie Space to the trash. TrashSpace(ctx context.Context, request GenieTrashSpaceRequest) error + + // Updates a Genie space with a serialized payload. + UpdateSpace(ctx context.Context, request GenieUpdateSpaceRequest) (*GenieSpace, error) } // These APIs provide specific management operations for Lakeview dashboards. diff --git a/service/dashboards/model.go b/service/dashboards/model.go index 10f1b0605..997ade788 100755 --- a/service/dashboards/model.go +++ b/service/dashboards/model.go @@ -272,6 +272,29 @@ type GenieCreateConversationMessageRequest struct { SpaceId string `json:"-" url:"-"` } +type GenieCreateSpaceRequest struct { + // Optional description + Description string `json:"description,omitempty"` + // Parent folder path where the space will be registered + ParentPath string `json:"parent_path,omitempty"` + // Serialized export model for the space contents + SerializedSpace string `json:"serialized_space"` + // Optional title override + Title string `json:"title,omitempty"` + // Warehouse to associate with the new space + WarehouseId string `json:"warehouse_id"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *GenieCreateSpaceRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GenieCreateSpaceRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type GenieDeleteConversationMessageRequest struct { // The ID associated with the conversation. ConversationId string `json:"-" url:"-"` @@ -404,8 +427,21 @@ type GenieGetQueryResultByAttachmentRequest struct { } type GenieGetSpaceRequest struct { + // Whether to include the serialized space export in the response. Requires + // at least CAN EDIT permission on the space. + IncludeSerializedSpace bool `json:"-" url:"include_serialized_space,omitempty"` // The ID associated with the Genie space SpaceId string `json:"-" url:"-"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *GenieGetSpaceRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GenieGetSpaceRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) } type GenieListConversationMessagesRequest struct { @@ -669,6 +705,29 @@ type GenieTrashSpaceRequest struct { SpaceId string `json:"-" url:"-"` } +type GenieUpdateSpaceRequest struct { + // Optional description + Description string `json:"description,omitempty"` + // Serialized export model for the space contents (full replacement) + SerializedSpace string `json:"serialized_space,omitempty"` + // Genie space ID + SpaceId string `json:"-" url:"-"` + // Optional title override + Title string `json:"title,omitempty"` + // Optional warehouse override + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *GenieUpdateSpaceRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GenieUpdateSpaceRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type GetDashboardRequest struct { // UUID identifying the dashboard. DashboardId string `json:"-" url:"-"` @@ -972,8 +1031,6 @@ const MessageErrorTypeInvalidCertifiedAnswerFunctionException MessageErrorType = const MessageErrorTypeInvalidCertifiedAnswerIdentifierException MessageErrorType = `INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION` -const MessageErrorTypeInvalidChatCompletionArgumentsJsonException MessageErrorType = `INVALID_CHAT_COMPLETION_ARGUMENTS_JSON_EXCEPTION` - const MessageErrorTypeInvalidChatCompletionJsonException MessageErrorType = `INVALID_CHAT_COMPLETION_JSON_EXCEPTION` const MessageErrorTypeInvalidCompletionRequestException MessageErrorType = `INVALID_COMPLETION_REQUEST_EXCEPTION` @@ -1028,6 +1085,8 @@ const MessageErrorTypeUnexpectedReplyProcessException MessageErrorType = `UNEXPE const MessageErrorTypeUnknownAiModel MessageErrorType = `UNKNOWN_AI_MODEL` +const MessageErrorTypeUnsupportedConversationTypeException MessageErrorType = `UNSUPPORTED_CONVERSATION_TYPE_EXCEPTION` + const MessageErrorTypeWarehouseAccessMissingException MessageErrorType = `WAREHOUSE_ACCESS_MISSING_EXCEPTION` const MessageErrorTypeWarehouseNotFoundException MessageErrorType = `WAREHOUSE_NOT_FOUND_EXCEPTION` @@ -1040,11 +1099,11 @@ func (f *MessageErrorType) String() string { // Set raw string value and validate it against allowed values func (f *MessageErrorType) Set(v string) error { switch v { - case `BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION`, `CHAT_COMPLETION_CLIENT_EXCEPTION`, `CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION`, `CHAT_COMPLETION_NETWORK_EXCEPTION`, `CONTENT_FILTER_EXCEPTION`, `CONTEXT_EXCEEDED_EXCEPTION`, `COULD_NOT_GET_MODEL_DEPLOYMENTS_EXCEPTION`, `COULD_NOT_GET_UC_SCHEMA_EXCEPTION`, `DEPLOYMENT_NOT_FOUND_EXCEPTION`, `DESCRIBE_QUERY_INVALID_SQL_ERROR`, `DESCRIBE_QUERY_TIMEOUT`, `DESCRIBE_QUERY_UNEXPECTED_FAILURE`, `EXCEEDED_MAX_TOKEN_LENGTH_EXCEPTION`, `FUNCTIONS_NOT_AVAILABLE_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_TYPE_EXCEPTION`, `FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION`, `GENERATED_SQL_QUERY_TOO_LONG_EXCEPTION`, `GENERIC_CHAT_COMPLETION_EXCEPTION`, `GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION`, `GENERIC_SQL_EXEC_API_CALL_EXCEPTION`, `ILLEGAL_PARAMETER_DEFINITION_EXCEPTION`, `INTERNAL_CATALOG_ASSET_CREATION_FAILED_EXCEPTION`, `INTERNAL_CATALOG_ASSET_CREATION_ONGOING_EXCEPTION`, `INTERNAL_CATALOG_ASSET_CREATION_UNSUPPORTED_EXCEPTION`, `INTERNAL_CATALOG_MISSING_UC_PATH_EXCEPTION`, `INTERNAL_CATALOG_PATH_OVERLAP_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION`, `INVALID_CHAT_COMPLETION_ARGUMENTS_JSON_EXCEPTION`, `INVALID_CHAT_COMPLETION_JSON_EXCEPTION`, `INVALID_COMPLETION_REQUEST_EXCEPTION`, `INVALID_FUNCTION_CALL_EXCEPTION`, `INVALID_SQL_MULTIPLE_DATASET_REFERENCES_EXCEPTION`, `INVALID_SQL_MULTIPLE_STATEMENTS_EXCEPTION`, `INVALID_SQL_UNKNOWN_TABLE_EXCEPTION`, `INVALID_TABLE_IDENTIFIER_EXCEPTION`, `LOCAL_CONTEXT_EXCEEDED_EXCEPTION`, `MESSAGE_ATTACHMENT_TOO_LONG_ERROR`, `MESSAGE_CANCELLED_WHILE_EXECUTING_EXCEPTION`, `MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION`, `MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION`, `MISSING_SQL_QUERY_EXCEPTION`, `NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE`, `NO_QUERY_TO_VISUALIZE_EXCEPTION`, `NO_TABLES_TO_QUERY_EXCEPTION`, `RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION`, `RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION`, `REPLY_PROCESS_TIMEOUT_EXCEPTION`, `RETRYABLE_PROCESSING_EXCEPTION`, `SQL_EXECUTION_EXCEPTION`, `STOP_PROCESS_DUE_TO_AUTO_REGENERATE`, `TABLES_MISSING_EXCEPTION`, `TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION`, `TOO_MANY_TABLES_EXCEPTION`, `UNEXPECTED_REPLY_PROCESS_EXCEPTION`, `UNKNOWN_AI_MODEL`, `WAREHOUSE_ACCESS_MISSING_EXCEPTION`, `WAREHOUSE_NOT_FOUND_EXCEPTION`: + case `BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION`, `CHAT_COMPLETION_CLIENT_EXCEPTION`, `CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION`, `CHAT_COMPLETION_NETWORK_EXCEPTION`, `CONTENT_FILTER_EXCEPTION`, `CONTEXT_EXCEEDED_EXCEPTION`, `COULD_NOT_GET_MODEL_DEPLOYMENTS_EXCEPTION`, `COULD_NOT_GET_UC_SCHEMA_EXCEPTION`, `DEPLOYMENT_NOT_FOUND_EXCEPTION`, `DESCRIBE_QUERY_INVALID_SQL_ERROR`, `DESCRIBE_QUERY_TIMEOUT`, `DESCRIBE_QUERY_UNEXPECTED_FAILURE`, `EXCEEDED_MAX_TOKEN_LENGTH_EXCEPTION`, `FUNCTIONS_NOT_AVAILABLE_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_TYPE_EXCEPTION`, `FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION`, `GENERATED_SQL_QUERY_TOO_LONG_EXCEPTION`, `GENERIC_CHAT_COMPLETION_EXCEPTION`, `GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION`, `GENERIC_SQL_EXEC_API_CALL_EXCEPTION`, `ILLEGAL_PARAMETER_DEFINITION_EXCEPTION`, `INTERNAL_CATALOG_ASSET_CREATION_FAILED_EXCEPTION`, `INTERNAL_CATALOG_ASSET_CREATION_ONGOING_EXCEPTION`, `INTERNAL_CATALOG_ASSET_CREATION_UNSUPPORTED_EXCEPTION`, `INTERNAL_CATALOG_MISSING_UC_PATH_EXCEPTION`, `INTERNAL_CATALOG_PATH_OVERLAP_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION`, `INVALID_CHAT_COMPLETION_JSON_EXCEPTION`, `INVALID_COMPLETION_REQUEST_EXCEPTION`, `INVALID_FUNCTION_CALL_EXCEPTION`, `INVALID_SQL_MULTIPLE_DATASET_REFERENCES_EXCEPTION`, `INVALID_SQL_MULTIPLE_STATEMENTS_EXCEPTION`, `INVALID_SQL_UNKNOWN_TABLE_EXCEPTION`, `INVALID_TABLE_IDENTIFIER_EXCEPTION`, `LOCAL_CONTEXT_EXCEEDED_EXCEPTION`, `MESSAGE_ATTACHMENT_TOO_LONG_ERROR`, `MESSAGE_CANCELLED_WHILE_EXECUTING_EXCEPTION`, `MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION`, `MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION`, `MISSING_SQL_QUERY_EXCEPTION`, `NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE`, `NO_QUERY_TO_VISUALIZE_EXCEPTION`, `NO_TABLES_TO_QUERY_EXCEPTION`, `RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION`, `RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION`, `REPLY_PROCESS_TIMEOUT_EXCEPTION`, `RETRYABLE_PROCESSING_EXCEPTION`, `SQL_EXECUTION_EXCEPTION`, `STOP_PROCESS_DUE_TO_AUTO_REGENERATE`, `TABLES_MISSING_EXCEPTION`, `TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION`, `TOO_MANY_TABLES_EXCEPTION`, `UNEXPECTED_REPLY_PROCESS_EXCEPTION`, `UNKNOWN_AI_MODEL`, `UNSUPPORTED_CONVERSATION_TYPE_EXCEPTION`, `WAREHOUSE_ACCESS_MISSING_EXCEPTION`, `WAREHOUSE_NOT_FOUND_EXCEPTION`: *f = MessageErrorType(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION", "CHAT_COMPLETION_CLIENT_EXCEPTION", "CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION", "CHAT_COMPLETION_NETWORK_EXCEPTION", "CONTENT_FILTER_EXCEPTION", "CONTEXT_EXCEEDED_EXCEPTION", "COULD_NOT_GET_MODEL_DEPLOYMENTS_EXCEPTION", "COULD_NOT_GET_UC_SCHEMA_EXCEPTION", "DEPLOYMENT_NOT_FOUND_EXCEPTION", "DESCRIBE_QUERY_INVALID_SQL_ERROR", "DESCRIBE_QUERY_TIMEOUT", "DESCRIBE_QUERY_UNEXPECTED_FAILURE", "EXCEEDED_MAX_TOKEN_LENGTH_EXCEPTION", "FUNCTIONS_NOT_AVAILABLE_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_TYPE_EXCEPTION", "FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION", "GENERATED_SQL_QUERY_TOO_LONG_EXCEPTION", "GENERIC_CHAT_COMPLETION_EXCEPTION", "GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION", "GENERIC_SQL_EXEC_API_CALL_EXCEPTION", "ILLEGAL_PARAMETER_DEFINITION_EXCEPTION", "INTERNAL_CATALOG_ASSET_CREATION_FAILED_EXCEPTION", "INTERNAL_CATALOG_ASSET_CREATION_ONGOING_EXCEPTION", "INTERNAL_CATALOG_ASSET_CREATION_UNSUPPORTED_EXCEPTION", "INTERNAL_CATALOG_MISSING_UC_PATH_EXCEPTION", "INTERNAL_CATALOG_PATH_OVERLAP_EXCEPTION", "INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION", "INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION", "INVALID_CHAT_COMPLETION_ARGUMENTS_JSON_EXCEPTION", "INVALID_CHAT_COMPLETION_JSON_EXCEPTION", "INVALID_COMPLETION_REQUEST_EXCEPTION", "INVALID_FUNCTION_CALL_EXCEPTION", "INVALID_SQL_MULTIPLE_DATASET_REFERENCES_EXCEPTION", "INVALID_SQL_MULTIPLE_STATEMENTS_EXCEPTION", "INVALID_SQL_UNKNOWN_TABLE_EXCEPTION", "INVALID_TABLE_IDENTIFIER_EXCEPTION", "LOCAL_CONTEXT_EXCEEDED_EXCEPTION", "MESSAGE_ATTACHMENT_TOO_LONG_ERROR", "MESSAGE_CANCELLED_WHILE_EXECUTING_EXCEPTION", "MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION", "MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION", "MISSING_SQL_QUERY_EXCEPTION", "NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE", "NO_QUERY_TO_VISUALIZE_EXCEPTION", "NO_TABLES_TO_QUERY_EXCEPTION", "RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION", "RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION", "REPLY_PROCESS_TIMEOUT_EXCEPTION", "RETRYABLE_PROCESSING_EXCEPTION", "SQL_EXECUTION_EXCEPTION", "STOP_PROCESS_DUE_TO_AUTO_REGENERATE", "TABLES_MISSING_EXCEPTION", "TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION", "TOO_MANY_TABLES_EXCEPTION", "UNEXPECTED_REPLY_PROCESS_EXCEPTION", "UNKNOWN_AI_MODEL", "WAREHOUSE_ACCESS_MISSING_EXCEPTION", "WAREHOUSE_NOT_FOUND_EXCEPTION"`, v) + return fmt.Errorf(`value "%s" is not one of "BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION", "CHAT_COMPLETION_CLIENT_EXCEPTION", "CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION", "CHAT_COMPLETION_NETWORK_EXCEPTION", "CONTENT_FILTER_EXCEPTION", "CONTEXT_EXCEEDED_EXCEPTION", "COULD_NOT_GET_MODEL_DEPLOYMENTS_EXCEPTION", "COULD_NOT_GET_UC_SCHEMA_EXCEPTION", "DEPLOYMENT_NOT_FOUND_EXCEPTION", "DESCRIBE_QUERY_INVALID_SQL_ERROR", "DESCRIBE_QUERY_TIMEOUT", "DESCRIBE_QUERY_UNEXPECTED_FAILURE", "EXCEEDED_MAX_TOKEN_LENGTH_EXCEPTION", "FUNCTIONS_NOT_AVAILABLE_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_TYPE_EXCEPTION", "FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION", "GENERATED_SQL_QUERY_TOO_LONG_EXCEPTION", "GENERIC_CHAT_COMPLETION_EXCEPTION", "GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION", "GENERIC_SQL_EXEC_API_CALL_EXCEPTION", "ILLEGAL_PARAMETER_DEFINITION_EXCEPTION", "INTERNAL_CATALOG_ASSET_CREATION_FAILED_EXCEPTION", "INTERNAL_CATALOG_ASSET_CREATION_ONGOING_EXCEPTION", "INTERNAL_CATALOG_ASSET_CREATION_UNSUPPORTED_EXCEPTION", "INTERNAL_CATALOG_MISSING_UC_PATH_EXCEPTION", "INTERNAL_CATALOG_PATH_OVERLAP_EXCEPTION", "INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION", "INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION", "INVALID_CHAT_COMPLETION_JSON_EXCEPTION", "INVALID_COMPLETION_REQUEST_EXCEPTION", "INVALID_FUNCTION_CALL_EXCEPTION", "INVALID_SQL_MULTIPLE_DATASET_REFERENCES_EXCEPTION", "INVALID_SQL_MULTIPLE_STATEMENTS_EXCEPTION", "INVALID_SQL_UNKNOWN_TABLE_EXCEPTION", "INVALID_TABLE_IDENTIFIER_EXCEPTION", "LOCAL_CONTEXT_EXCEEDED_EXCEPTION", "MESSAGE_ATTACHMENT_TOO_LONG_ERROR", "MESSAGE_CANCELLED_WHILE_EXECUTING_EXCEPTION", "MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION", "MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION", "MISSING_SQL_QUERY_EXCEPTION", "NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE", "NO_QUERY_TO_VISUALIZE_EXCEPTION", "NO_TABLES_TO_QUERY_EXCEPTION", "RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION", "RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION", "REPLY_PROCESS_TIMEOUT_EXCEPTION", "RETRYABLE_PROCESSING_EXCEPTION", "SQL_EXECUTION_EXCEPTION", "STOP_PROCESS_DUE_TO_AUTO_REGENERATE", "TABLES_MISSING_EXCEPTION", "TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION", "TOO_MANY_TABLES_EXCEPTION", "UNEXPECTED_REPLY_PROCESS_EXCEPTION", "UNKNOWN_AI_MODEL", "UNSUPPORTED_CONVERSATION_TYPE_EXCEPTION", "WAREHOUSE_ACCESS_MISSING_EXCEPTION", "WAREHOUSE_NOT_FOUND_EXCEPTION"`, v) } } @@ -1083,7 +1142,6 @@ func (f *MessageErrorType) Values() []MessageErrorType { MessageErrorTypeInternalCatalogPathOverlapException, MessageErrorTypeInvalidCertifiedAnswerFunctionException, MessageErrorTypeInvalidCertifiedAnswerIdentifierException, - MessageErrorTypeInvalidChatCompletionArgumentsJsonException, MessageErrorTypeInvalidChatCompletionJsonException, MessageErrorTypeInvalidCompletionRequestException, MessageErrorTypeInvalidFunctionCallException, @@ -1111,6 +1169,7 @@ func (f *MessageErrorType) Values() []MessageErrorType { MessageErrorTypeTooManyTablesException, MessageErrorTypeUnexpectedReplyProcessException, MessageErrorTypeUnknownAiModel, + MessageErrorTypeUnsupportedConversationTypeException, MessageErrorTypeWarehouseAccessMissingException, MessageErrorTypeWarehouseNotFoundException, } @@ -1453,6 +1512,8 @@ type TextAttachment struct { Content string `json:"content,omitempty"` Id string `json:"id,omitempty"` + // Purpose/intent of this text attachment + Purpose TextAttachmentPurpose `json:"purpose,omitempty"` ForceSendFields []string `json:"-" url:"-"` } @@ -1465,6 +1526,41 @@ func (s TextAttachment) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Purpose/intent of a text attachment +type TextAttachmentPurpose string + +const TextAttachmentPurposeFollowUpQuestion TextAttachmentPurpose = `FOLLOW_UP_QUESTION` + +// String representation for [fmt.Print] +func (f *TextAttachmentPurpose) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *TextAttachmentPurpose) Set(v string) error { + switch v { + case `FOLLOW_UP_QUESTION`: + *f = TextAttachmentPurpose(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FOLLOW_UP_QUESTION"`, v) + } +} + +// Values returns all possible values for TextAttachmentPurpose. +// +// There is no guarantee on the order of the values in the slice. +func (f *TextAttachmentPurpose) Values() []TextAttachmentPurpose { + return []TextAttachmentPurpose{ + TextAttachmentPurposeFollowUpQuestion, + } +} + +// Type always returns TextAttachmentPurpose to satisfy [pflag.Value] interface +func (f *TextAttachmentPurpose) Type() string { + return "TextAttachmentPurpose" +} + type TrashDashboardRequest struct { // UUID identifying the dashboard. DashboardId string `json:"-" url:"-"` diff --git a/service/database/model.go b/service/database/model.go index fdf21c58b..c0183deab 100755 --- a/service/database/model.go +++ b/service/database/model.go @@ -721,6 +721,8 @@ func (s ListSyncedDatabaseTablesResponse) MarshalJSON() ([]byte, error) { // SyncedDatabaseTable. Note that other fields of pipeline are still inferred by // table def internally type NewPipelineSpec struct { + // Budget policy to set on the newly created pipeline. + BudgetPolicyId string `json:"budget_policy_id,omitempty"` // This field needs to be specified if the destination catalog is a managed // postgres catalog. // diff --git a/service/iam/api.go b/service/iam/api.go index e389310da..0d2c4d295 100755 --- a/service/iam/api.go +++ b/service/iam/api.go @@ -109,15 +109,19 @@ type AccountGroupsV2Interface interface { Get(ctx context.Context, request GetAccountGroupRequest) (*AccountGroup, error) // Gets all details of the groups associated with the Databricks account. As of - // 08/22/2025, this endpoint will not return members. Instead, members should be - // retrieved by iterating through `Get group details`. + // 08/22/2025, this endpoint will no longer return members. Instead, members + // should be retrieved by iterating through `Get group details`. Existing + // accounts that rely on this attribute will not be impacted and will continue + // receiving member data as before. // // This method is generated by Databricks SDK Code Generator. List(ctx context.Context, request ListAccountGroupsRequest) listing.Iterator[AccountGroup] // Gets all details of the groups associated with the Databricks account. As of - // 08/22/2025, this endpoint will not return members. Instead, members should be - // retrieved by iterating through `Get group details`. + // 08/22/2025, this endpoint will no longer return members. Instead, members + // should be retrieved by iterating through `Get group details`. Existing + // accounts that rely on this attribute will not be impacted and will continue + // receiving member data as before. // // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListAccountGroupsRequest) ([]AccountGroup, error) diff --git a/service/iam/impl.go b/service/iam/impl.go index 1cf2aa32b..a07c5b79c 100755 --- a/service/iam/impl.go +++ b/service/iam/impl.go @@ -134,8 +134,10 @@ func (a *accountGroupsV2Impl) Get(ctx context.Context, request GetAccountGroupRe } // Gets all details of the groups associated with the Databricks account. As of -// 08/22/2025, this endpoint will not return members. Instead, members should be -// retrieved by iterating through `Get group details`. +// 08/22/2025, this endpoint will no longer return members. Instead, members +// should be retrieved by iterating through `Get group details`. Existing +// accounts that rely on this attribute will not be impacted and will continue +// receiving member data as before. func (a *accountGroupsV2Impl) List(ctx context.Context, request ListAccountGroupsRequest) listing.Iterator[AccountGroup] { request.StartIndex = 1 // SCIM offset starts from 1 @@ -165,8 +167,10 @@ func (a *accountGroupsV2Impl) List(ctx context.Context, request ListAccountGroup } // Gets all details of the groups associated with the Databricks account. As of -// 08/22/2025, this endpoint will not return members. Instead, members should be -// retrieved by iterating through `Get group details`. +// 08/22/2025, this endpoint will no longer return members. Instead, members +// should be retrieved by iterating through `Get group details`. Existing +// accounts that rely on this attribute will not be impacted and will continue +// receiving member data as before. func (a *accountGroupsV2Impl) ListAll(ctx context.Context, request ListAccountGroupsRequest) ([]AccountGroup, error) { iterator := a.List(ctx, request) return listing.ToSliceN[AccountGroup, int64](ctx, iterator, request.Count) diff --git a/service/iam/interface.go b/service/iam/interface.go index 5754a981c..3bf16c768 100755 --- a/service/iam/interface.go +++ b/service/iam/interface.go @@ -84,8 +84,10 @@ type AccountGroupsV2Service interface { Get(ctx context.Context, request GetAccountGroupRequest) (*AccountGroup, error) // Gets all details of the groups associated with the Databricks account. As - // of 08/22/2025, this endpoint will not return members. Instead, members - // should be retrieved by iterating through `Get group details`. + // of 08/22/2025, this endpoint will no longer return members. Instead, + // members should be retrieved by iterating through `Get group details`. + // Existing accounts that rely on this attribute will not be impacted and + // will continue receiving member data as before. List(ctx context.Context, request ListAccountGroupsRequest) (*ListAccountGroupsResponse, error) // Partially updates the details of a group. diff --git a/service/jobs/api.go b/service/jobs/api.go index b5f4d48f9..77fd11392 100755 --- a/service/jobs/api.go +++ b/service/jobs/api.go @@ -244,7 +244,7 @@ func NewJobs(client *client.DatabricksClient) *JobsAPI { // Databricks manages the task orchestration, cluster management, monitoring, // and error reporting for all of your jobs. You can run your jobs immediately // or periodically through an easy-to-use scheduling system. You can implement -// job tasks using notebooks, JARS, Delta Live Tables pipelines, or Python, +// job tasks using notebooks, JARS, Spark Declarative Pipelines, or Python, // Scala, Spark submit, and Java applications. // // You should never hard code secrets or store them in plain text. Use the diff --git a/service/jobs/interface.go b/service/jobs/interface.go index 7b90299e4..c1c757af2 100755 --- a/service/jobs/interface.go +++ b/service/jobs/interface.go @@ -14,7 +14,7 @@ import ( // Databricks manages the task orchestration, cluster management, monitoring, // and error reporting for all of your jobs. You can run your jobs immediately // or periodically through an easy-to-use scheduling system. You can implement -// job tasks using notebooks, JARS, Delta Live Tables pipelines, or Python, +// job tasks using notebooks, JARS, Spark Declarative Pipelines, or Python, // Scala, Spark submit, and Java applications. // // You should never hard code secrets or store them in plain text. Use the diff --git a/service/jobs/model.go b/service/jobs/model.go index 3954a27d3..0a903fba6 100755 --- a/service/jobs/model.go +++ b/service/jobs/model.go @@ -667,11 +667,12 @@ type CreateJob struct { // begin or complete as well as when this job is deleted. EmailNotifications *JobEmailNotifications `json:"email_notifications,omitempty"` // A list of task execution environment specifications that can be - // referenced by serverless tasks of this job. An environment is required to - // be present for serverless tasks. For serverless notebook tasks, the - // environment is accessible in the notebook environment panel. For other - // serverless tasks, the task environment is required to be specified using - // environment_key in the task settings. + // referenced by serverless tasks of this job. For serverless notebook + // tasks, if the environment_key is not specified, the notebook environment + // will be used if present. If a jobs environment is specified, it will + // override the notebook environment. For other serverless tasks, the task + // environment is required to be specified using environment_key in the task + // settings. Environments []JobEnvironment `json:"environments,omitempty"` // Used to tell what is the format of the job. This field is ignored in // Create/Update/Reset calls. When using the Jobs API 2.1 this value is @@ -2074,11 +2075,12 @@ type JobSettings struct { // begin or complete as well as when this job is deleted. EmailNotifications *JobEmailNotifications `json:"email_notifications,omitempty"` // A list of task execution environment specifications that can be - // referenced by serverless tasks of this job. An environment is required to - // be present for serverless tasks. For serverless notebook tasks, the - // environment is accessible in the notebook environment panel. For other - // serverless tasks, the task environment is required to be specified using - // environment_key in the task settings. + // referenced by serverless tasks of this job. For serverless notebook + // tasks, if the environment_key is not specified, the notebook environment + // will be used if present. If a jobs environment is specified, it will + // override the notebook environment. For other serverless tasks, the task + // environment is required to be specified using environment_key in the task + // settings. Environments []JobEnvironment `json:"environments,omitempty"` // Used to tell what is the format of the job. This field is ignored in // Create/Update/Reset calls. When using the Jobs API 2.1 this value is @@ -5226,7 +5228,7 @@ type TableUpdateTriggerConfiguration struct { MinTimeBetweenTriggersSeconds int `json:"min_time_between_triggers_seconds,omitempty"` // A list of tables to monitor for changes. The table name must be in the // format `catalog_name.schema_name.table_name`. - TableNames []string `json:"table_names,omitempty"` + TableNames []string `json:"table_names"` // If set, the trigger starts a run only after no table updates have // occurred for the specified time and can be used to wait for a series of // table updates before triggering a run. The minimum allowed value is 60 diff --git a/service/marketplace/impl.go b/service/marketplace/impl.go index 459257581..87f640ad0 100755 --- a/service/marketplace/impl.go +++ b/service/marketplace/impl.go @@ -10,7 +10,6 @@ import ( "github.com/databricks/databricks-sdk-go/client" "github.com/databricks/databricks-sdk-go/listing" "github.com/databricks/databricks-sdk-go/useragent" - "github.com/google/uuid" ) // unexported type that holds implementations of just ConsumerFulfillments API methods @@ -953,9 +952,6 @@ func (a *providerPersonalizationRequestsImpl) internalList(ctx context.Context, func (a *providerPersonalizationRequestsImpl) Update(ctx context.Context, request UpdatePersonalizationRequestRequest) (*UpdatePersonalizationRequestResponse, error) { var updatePersonalizationRequestResponse UpdatePersonalizationRequestResponse - if request.RequestId == "" { - request.RequestId = uuid.New().String() - } path := fmt.Sprintf("/api/2.0/marketplace-provider/listings/%v/personalization-requests/%v/request-status", request.ListingId, request.RequestId) queryParams := make(map[string]any) headers := make(map[string]string) diff --git a/service/marketplace/model.go b/service/marketplace/model.go index a376f6607..97b76d071 100755 --- a/service/marketplace/model.go +++ b/service/marketplace/model.go @@ -1831,7 +1831,8 @@ type PersonalizationRequest struct { ProviderId string `json:"provider_id,omitempty"` RecipientType DeltaSharingRecipientType `json:"recipient_type,omitempty"` - + // Share information is required for data listings but should be + // empty/ignored for non-data listings (MCP and App). Share *ShareInfo `json:"share,omitempty"` Status PersonalizationRequestStatus `json:"status,omitempty"` diff --git a/service/ml/api.go b/service/ml/api.go index 2ce87a5c9..e90ad71a4 100755 --- a/service/ml/api.go +++ b/service/ml/api.go @@ -356,6 +356,9 @@ func (a *ExperimentsAPI) GetPermissionsByExperimentId(ctx context.Context, exper type FeatureEngineeringInterface interface { + // Batch create materialized features. + BatchCreateMaterializedFeatures(ctx context.Context, request BatchCreateMaterializedFeaturesRequest) (*BatchCreateMaterializedFeaturesResponse, error) + // Create a Feature. CreateFeature(ctx context.Context, request CreateFeatureRequest) (*Feature, error) diff --git a/service/ml/impl.go b/service/ml/impl.go index 08bfd6aaa..ebeaaec53 100755 --- a/service/ml/impl.go +++ b/service/ml/impl.go @@ -592,6 +592,17 @@ type featureEngineeringImpl struct { client *client.DatabricksClient } +func (a *featureEngineeringImpl) BatchCreateMaterializedFeatures(ctx context.Context, request BatchCreateMaterializedFeaturesRequest) (*BatchCreateMaterializedFeaturesResponse, error) { + var batchCreateMaterializedFeaturesResponse BatchCreateMaterializedFeaturesResponse + path := "/api/2.0/feature-engineering/materialized-features:batchCreate" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &batchCreateMaterializedFeaturesResponse) + return &batchCreateMaterializedFeaturesResponse, err +} + func (a *featureEngineeringImpl) CreateFeature(ctx context.Context, request CreateFeatureRequest) (*Feature, error) { var feature Feature path := "/api/2.0/feature-engineering/features" diff --git a/service/ml/interface.go b/service/ml/interface.go index 7f2471d83..8d0f18413 100755 --- a/service/ml/interface.go +++ b/service/ml/interface.go @@ -257,6 +257,9 @@ type ExperimentsService interface { // Deprecated: Do not use this interface, it will be removed in a future version of the SDK. type FeatureEngineeringService interface { + // Batch create materialized features. + BatchCreateMaterializedFeatures(ctx context.Context, request BatchCreateMaterializedFeaturesRequest) (*BatchCreateMaterializedFeaturesResponse, error) + // Create a Feature. CreateFeature(ctx context.Context, request CreateFeatureRequest) (*Feature, error) diff --git a/service/ml/model.go b/service/ml/model.go index 190633a41..63fe93a62 100755 --- a/service/ml/model.go +++ b/service/ml/model.go @@ -238,6 +238,16 @@ type ApproveTransitionRequestResponse struct { Activity *Activity `json:"activity,omitempty"` } +type BatchCreateMaterializedFeaturesRequest struct { + // The requests to create materialized features. + Requests []CreateMaterializedFeatureRequest `json:"requests"` +} + +type BatchCreateMaterializedFeaturesResponse struct { + // The created materialized features with assigned IDs. + MaterializedFeatures []MaterializedFeature `json:"materialized_features,omitempty"` +} + // An action that a user (with sufficient permissions) could take on an activity // or comment. // @@ -1160,6 +1170,13 @@ type Feature struct { Function Function `json:"function"` // The input columns from which the feature is computed. Inputs []string `json:"inputs"` + // WARNING: This field is primarily intended for internal use by Databricks + // systems and is automatically populated when features are created through + // Databricks notebooks or jobs. Users should not manually set this field as + // incorrect values may lead to inaccurate lineage tracking or unexpected + // behavior. This field will be set by feature-engineering client and should + // be left unset by SDK and terraform users. + LineageContext *LineageContext `json:"lineage_context,omitempty"` // The data source of the feature. Source DataSource `json:"source"` // The time window in which the feature is computed. @@ -1727,6 +1744,23 @@ type InputTag struct { Value string `json:"value"` } +type JobContext struct { + // The job ID where this API invoked. + JobId int64 `json:"job_id,omitempty"` + // The job run ID where this API was invoked. + JobRunId int64 `json:"job_run_id,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *JobContext) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s JobContext) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type JobSpec struct { // The personal access token used to authorize webhook's job runs. AccessToken string `json:"access_token"` @@ -1767,6 +1801,26 @@ func (s JobSpecWithoutSecret) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Lineage context information for tracking where an API was invoked. This will +// allow us to track lineage, which currently uses caller entity information for +// use across the Lineage Client and Observability in Lumberjack. +type LineageContext struct { + // Job context information including job ID and run ID. + JobContext *JobContext `json:"job_context,omitempty"` + // The notebook ID where this API was invoked. + NotebookId int64 `json:"notebook_id,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *LineageContext) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s LineageContext) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Feature for model version. ([ML-57150] Renamed from Feature to LinkedFeature) type LinkedFeature struct { // Feature name @@ -2411,7 +2465,7 @@ type MaterializedFeature struct { OfflineStoreConfig *OfflineStoreConfig `json:"offline_store_config,omitempty"` - OnlineStoreConfig *OnlineStore `json:"online_store_config,omitempty"` + OnlineStoreConfig *OnlineStoreConfig `json:"online_store_config,omitempty"` // The schedule state of the materialization pipeline. PipelineScheduleState MaterializedFeaturePipelineScheduleState `json:"pipeline_schedule_state,omitempty"` // The fully qualified Unity Catalog path to the table containing the @@ -2800,6 +2854,20 @@ func (s OnlineStore) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Configuration for online store destination. +type OnlineStoreConfig struct { + // The Unity Catalog catalog name. This name is also used as the Lakebase + // logical database name. + CatalogName string `json:"catalog_name"` + // The name of the target online store. + OnlineStoreName string `json:"online_store_name"` + // The Unity Catalog schema name. + SchemaName string `json:"schema_name"` + // Prefix for Unity Catalog table name. The materialized feature will be + // stored in a Lakebase table with this prefix and a generated postfix. + TableNamePrefix string `json:"table_name_prefix"` +} + type OnlineStoreState string const OnlineStoreStateAvailable OnlineStoreState = `AVAILABLE` diff --git a/service/pipelines/api.go b/service/pipelines/api.go index d843e562a..7935f5eb8 100755 --- a/service/pipelines/api.go +++ b/service/pipelines/api.go @@ -1,19 +1,20 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// The Delta Live Tables API allows you to create, edit, delete, start, and view -// details about pipelines. +// The Lakeflow Spark Declarative Pipelines API allows you to create, edit, +// delete, start, and view details about pipelines. // -// Delta Live Tables is a framework for building reliable, maintainable, and -// testable data processing pipelines. You define the transformations to perform -// on your data, and Delta Live Tables manages task orchestration, cluster -// management, monitoring, data quality, and error handling. +// Spark Declarative Pipelines is a framework for building reliable, +// maintainable, and testable data processing pipelines. You define the +// transformations to perform on your data, and Spark Declarative Pipelines +// manages task orchestration, cluster management, monitoring, data quality, and +// error handling. // // Instead of defining your data pipelines using a series of separate Apache -// Spark tasks, Delta Live Tables manages how your data is transformed based on -// a target schema you define for each processing step. You can also enforce -// data quality with Delta Live Tables expectations. Expectations allow you to -// define expected data quality and specify how to handle records that fail -// those expectations. +// Spark tasks, Spark Declarative Pipelines manages how your data is transformed +// based on a target schema you define for each processing step. You can also +// enforce data quality with Spark Declarative Pipelines expectations. +// Expectations allow you to define expected data quality and specify how to +// handle records that fail those expectations. package pipelines import ( @@ -37,12 +38,14 @@ type PipelinesInterface interface { // If successful, this method returns the ID of the new pipeline. Create(ctx context.Context, request CreatePipeline) (*CreatePipelineResponse, error) - // Deletes a pipeline. Deleting a pipeline is a permanent action that stops and - // removes the pipeline and its tables. You cannot undo this action. + // Deletes a pipeline. If the pipeline publishes to Unity Catalog, pipeline + // deletion will cascade to all pipeline tables. Please reach out to Databricks + // support for assistance to undo this action. Delete(ctx context.Context, request DeletePipelineRequest) error - // Deletes a pipeline. Deleting a pipeline is a permanent action that stops and - // removes the pipeline and its tables. You cannot undo this action. + // Deletes a pipeline. If the pipeline publishes to Unity Catalog, pipeline + // deletion will cascade to all pipeline tables. Please reach out to Databricks + // support for assistance to undo this action. DeleteByPipelineId(ctx context.Context, pipelineId string) error // Get a pipeline. @@ -84,12 +87,12 @@ type PipelinesInterface interface { // Retrieves events for a pipeline. ListPipelineEventsByPipelineId(ctx context.Context, pipelineId string) (*ListPipelineEventsResponse, error) - // Lists pipelines defined in the Delta Live Tables system. + // Lists pipelines defined in the Spark Declarative Pipelines system. // // This method is generated by Databricks SDK Code Generator. ListPipelines(ctx context.Context, request ListPipelinesRequest) listing.Iterator[PipelineStateInfo] - // Lists pipelines defined in the Delta Live Tables system. + // Lists pipelines defined in the Spark Declarative Pipelines system. // // This method is generated by Databricks SDK Code Generator. ListPipelinesAll(ctx context.Context, request ListPipelinesRequest) ([]PipelineStateInfo, error) @@ -156,20 +159,21 @@ func NewPipelines(client *client.DatabricksClient) *PipelinesAPI { } } -// The Delta Live Tables API allows you to create, edit, delete, start, and view -// details about pipelines. +// The Lakeflow Spark Declarative Pipelines API allows you to create, edit, +// delete, start, and view details about pipelines. // -// Delta Live Tables is a framework for building reliable, maintainable, and -// testable data processing pipelines. You define the transformations to perform -// on your data, and Delta Live Tables manages task orchestration, cluster -// management, monitoring, data quality, and error handling. +// Spark Declarative Pipelines is a framework for building reliable, +// maintainable, and testable data processing pipelines. You define the +// transformations to perform on your data, and Spark Declarative Pipelines +// manages task orchestration, cluster management, monitoring, data quality, and +// error handling. // // Instead of defining your data pipelines using a series of separate Apache -// Spark tasks, Delta Live Tables manages how your data is transformed based on -// a target schema you define for each processing step. You can also enforce -// data quality with Delta Live Tables expectations. Expectations allow you to -// define expected data quality and specify how to handle records that fail -// those expectations. +// Spark tasks, Spark Declarative Pipelines manages how your data is transformed +// based on a target schema you define for each processing step. You can also +// enforce data quality with Spark Declarative Pipelines expectations. +// Expectations allow you to define expected data quality and specify how to +// handle records that fail those expectations. type PipelinesAPI struct { pipelinesImpl } @@ -228,8 +232,9 @@ func (w *WaitGetPipelineIdle[R]) GetWithTimeout(timeout time.Duration) (*GetPipe return w.Poll(timeout, w.callback) } -// Deletes a pipeline. Deleting a pipeline is a permanent action that stops and -// removes the pipeline and its tables. You cannot undo this action. +// Deletes a pipeline. If the pipeline publishes to Unity Catalog, pipeline +// deletion will cascade to all pipeline tables. Please reach out to Databricks +// support for assistance to undo this action. func (a *PipelinesAPI) DeleteByPipelineId(ctx context.Context, pipelineId string) error { return a.pipelinesImpl.Delete(ctx, DeletePipelineRequest{ PipelineId: pipelineId, diff --git a/service/pipelines/impl.go b/service/pipelines/impl.go index 1147be7b8..15b8bf25e 100755 --- a/service/pipelines/impl.go +++ b/service/pipelines/impl.go @@ -118,7 +118,7 @@ func (a *pipelinesImpl) internalListPipelineEvents(ctx context.Context, request return &listPipelineEventsResponse, err } -// Lists pipelines defined in the Delta Live Tables system. +// Lists pipelines defined in the Spark Declarative Pipelines system. func (a *pipelinesImpl) ListPipelines(ctx context.Context, request ListPipelinesRequest) listing.Iterator[PipelineStateInfo] { getNextPage := func(ctx context.Context, req ListPipelinesRequest) (*ListPipelinesResponse, error) { @@ -143,7 +143,7 @@ func (a *pipelinesImpl) ListPipelines(ctx context.Context, request ListPipelines return iterator } -// Lists pipelines defined in the Delta Live Tables system. +// Lists pipelines defined in the Spark Declarative Pipelines system. func (a *pipelinesImpl) ListPipelinesAll(ctx context.Context, request ListPipelinesRequest) ([]PipelineStateInfo, error) { iterator := a.ListPipelines(ctx, request) return listing.ToSlice[PipelineStateInfo](ctx, iterator) diff --git a/service/pipelines/interface.go b/service/pipelines/interface.go index 31816b10d..81202dc0c 100755 --- a/service/pipelines/interface.go +++ b/service/pipelines/interface.go @@ -6,20 +6,21 @@ import ( "context" ) -// The Delta Live Tables API allows you to create, edit, delete, start, and view -// details about pipelines. +// The Lakeflow Spark Declarative Pipelines API allows you to create, edit, +// delete, start, and view details about pipelines. // -// Delta Live Tables is a framework for building reliable, maintainable, and -// testable data processing pipelines. You define the transformations to perform -// on your data, and Delta Live Tables manages task orchestration, cluster -// management, monitoring, data quality, and error handling. +// Spark Declarative Pipelines is a framework for building reliable, +// maintainable, and testable data processing pipelines. You define the +// transformations to perform on your data, and Spark Declarative Pipelines +// manages task orchestration, cluster management, monitoring, data quality, and +// error handling. // // Instead of defining your data pipelines using a series of separate Apache -// Spark tasks, Delta Live Tables manages how your data is transformed based on -// a target schema you define for each processing step. You can also enforce -// data quality with Delta Live Tables expectations. Expectations allow you to -// define expected data quality and specify how to handle records that fail -// those expectations. +// Spark tasks, Spark Declarative Pipelines manages how your data is transformed +// based on a target schema you define for each processing step. You can also +// enforce data quality with Spark Declarative Pipelines expectations. +// Expectations allow you to define expected data quality and specify how to +// handle records that fail those expectations. // // Deprecated: Do not use this interface, it will be removed in a future version of the SDK. type PipelinesService interface { @@ -29,8 +30,9 @@ type PipelinesService interface { // pipeline. Create(ctx context.Context, request CreatePipeline) (*CreatePipelineResponse, error) - // Deletes a pipeline. Deleting a pipeline is a permanent action that stops - // and removes the pipeline and its tables. You cannot undo this action. + // Deletes a pipeline. If the pipeline publishes to Unity Catalog, pipeline + // deletion will cascade to all pipeline tables. Please reach out to + // Databricks support for assistance to undo this action. Delete(ctx context.Context, request DeletePipelineRequest) error // Get a pipeline. @@ -49,7 +51,7 @@ type PipelinesService interface { // Retrieves events for a pipeline. ListPipelineEvents(ctx context.Context, request ListPipelineEventsRequest) (*ListPipelineEventsResponse, error) - // Lists pipelines defined in the Delta Live Tables system. + // Lists pipelines defined in the Spark Declarative Pipelines system. ListPipelines(ctx context.Context, request ListPipelinesRequest) (*ListPipelinesResponse, error) // List updates for an active pipeline. diff --git a/service/pipelines/model.go b/service/pipelines/model.go index d3a5679d0..d91dcb000 100755 --- a/service/pipelines/model.go +++ b/service/pipelines/model.go @@ -9,6 +9,24 @@ import ( "github.com/databricks/databricks-sdk-go/service/compute" ) +type ConnectionParameters struct { + // Source catalog for initial connection. This is necessary for schema + // exploration in some database systems like Oracle, and optional but + // nice-to-have in some other database systems like Postgres. For Oracle + // databases, this maps to a service name. + SourceCatalog string `json:"source_catalog,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *ConnectionParameters) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ConnectionParameters) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type CreatePipeline struct { // If false, deployment will fail if name conflicts with that of another // pipeline. @@ -567,13 +585,16 @@ type IngestionGatewayPipelineDefinition struct { // Immutable. The Unity Catalog connection that this gateway pipeline uses // to communicate with the source. ConnectionName string `json:"connection_name"` + // Optional, Internal. Parameters required to establish an initial + // connection with the source. + ConnectionParameters *ConnectionParameters `json:"connection_parameters,omitempty"` // Required, Immutable. The name of the catalog for the gateway pipeline's // storage location. GatewayStorageCatalog string `json:"gateway_storage_catalog"` // Optional. The Unity Catalog-compatible name for the gateway storage // location. This is the destination to use for the data that is extracted - // by the gateway. Delta Live Tables system will automatically create the - // storage location under the catalog and schema. + // by the gateway. Spark Declarative Pipelines system will automatically + // create the storage location under the catalog and schema. GatewayStorageName string `json:"gateway_storage_name,omitempty"` // Required, Immutable. The name of the schema for the gateway pipelines's // storage location. @@ -595,6 +616,12 @@ type IngestionPipelineDefinition struct { // to communicate with the source. This is used with connectors for // applications like Salesforce, Workday, and so on. ConnectionName string `json:"connection_name,omitempty"` + // Immutable. If set to true, the pipeline will ingest tables from the UC + // foreign catalogs directly without the need to specify a UC connection or + // ingestion gateway. The `source_catalog` fields in objects of + // IngestionConfig are interpreted as the UC foreign catalogs to ingest + // from. + IngestFromUcForeignCatalog bool `json:"ingest_from_uc_foreign_catalog,omitempty"` // Immutable. Identifier for the gateway that is used by this ingestion // pipeline to communicate with the source database. This is used with // connectors to databases like SQL Server. @@ -718,24 +745,14 @@ type IngestionSourceType string const IngestionSourceTypeBigquery IngestionSourceType = `BIGQUERY` -const IngestionSourceTypeConfluence IngestionSourceType = `CONFLUENCE` - const IngestionSourceTypeDynamics365 IngestionSourceType = `DYNAMICS365` const IngestionSourceTypeForeignCatalog IngestionSourceType = `FOREIGN_CATALOG` const IngestionSourceTypeGa4RawData IngestionSourceType = `GA4_RAW_DATA` -const IngestionSourceTypeGoogleAds IngestionSourceType = `GOOGLE_ADS` - -const IngestionSourceTypeGuidewire IngestionSourceType = `GUIDEWIRE` - -const IngestionSourceTypeHubspot IngestionSourceType = `HUBSPOT` - const IngestionSourceTypeManagedPostgresql IngestionSourceType = `MANAGED_POSTGRESQL` -const IngestionSourceTypeMetaMarketing IngestionSourceType = `META_MARKETING` - const IngestionSourceTypeMysql IngestionSourceType = `MYSQL` const IngestionSourceTypeNetsuite IngestionSourceType = `NETSUITE` @@ -744,30 +761,18 @@ const IngestionSourceTypeOracle IngestionSourceType = `ORACLE` const IngestionSourceTypePostgresql IngestionSourceType = `POSTGRESQL` -const IngestionSourceTypeRedshift IngestionSourceType = `REDSHIFT` - const IngestionSourceTypeSalesforce IngestionSourceType = `SALESFORCE` -const IngestionSourceTypeSalesforceMarketingCloud IngestionSourceType = `SALESFORCE_MARKETING_CLOUD` - const IngestionSourceTypeServicenow IngestionSourceType = `SERVICENOW` const IngestionSourceTypeSharepoint IngestionSourceType = `SHAREPOINT` -const IngestionSourceTypeSqldw IngestionSourceType = `SQLDW` - const IngestionSourceTypeSqlserver IngestionSourceType = `SQLSERVER` const IngestionSourceTypeTeradata IngestionSourceType = `TERADATA` -const IngestionSourceTypeTiktokAds IngestionSourceType = `TIKTOK_ADS` - -const IngestionSourceTypeWorkdayHcm IngestionSourceType = `WORKDAY_HCM` - const IngestionSourceTypeWorkdayRaas IngestionSourceType = `WORKDAY_RAAS` -const IngestionSourceTypeZendesk IngestionSourceType = `ZENDESK` - // String representation for [fmt.Print] func (f *IngestionSourceType) String() string { return string(*f) @@ -776,11 +781,11 @@ func (f *IngestionSourceType) String() string { // Set raw string value and validate it against allowed values func (f *IngestionSourceType) Set(v string) error { switch v { - case `BIGQUERY`, `CONFLUENCE`, `DYNAMICS365`, `FOREIGN_CATALOG`, `GA4_RAW_DATA`, `GOOGLE_ADS`, `GUIDEWIRE`, `HUBSPOT`, `MANAGED_POSTGRESQL`, `META_MARKETING`, `MYSQL`, `NETSUITE`, `ORACLE`, `POSTGRESQL`, `REDSHIFT`, `SALESFORCE`, `SALESFORCE_MARKETING_CLOUD`, `SERVICENOW`, `SHAREPOINT`, `SQLDW`, `SQLSERVER`, `TERADATA`, `TIKTOK_ADS`, `WORKDAY_HCM`, `WORKDAY_RAAS`, `ZENDESK`: + case `BIGQUERY`, `DYNAMICS365`, `FOREIGN_CATALOG`, `GA4_RAW_DATA`, `MANAGED_POSTGRESQL`, `MYSQL`, `NETSUITE`, `ORACLE`, `POSTGRESQL`, `SALESFORCE`, `SERVICENOW`, `SHAREPOINT`, `SQLSERVER`, `TERADATA`, `WORKDAY_RAAS`: *f = IngestionSourceType(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "BIGQUERY", "CONFLUENCE", "DYNAMICS365", "FOREIGN_CATALOG", "GA4_RAW_DATA", "GOOGLE_ADS", "GUIDEWIRE", "HUBSPOT", "MANAGED_POSTGRESQL", "META_MARKETING", "MYSQL", "NETSUITE", "ORACLE", "POSTGRESQL", "REDSHIFT", "SALESFORCE", "SALESFORCE_MARKETING_CLOUD", "SERVICENOW", "SHAREPOINT", "SQLDW", "SQLSERVER", "TERADATA", "TIKTOK_ADS", "WORKDAY_HCM", "WORKDAY_RAAS", "ZENDESK"`, v) + return fmt.Errorf(`value "%s" is not one of "BIGQUERY", "DYNAMICS365", "FOREIGN_CATALOG", "GA4_RAW_DATA", "MANAGED_POSTGRESQL", "MYSQL", "NETSUITE", "ORACLE", "POSTGRESQL", "SALESFORCE", "SERVICENOW", "SHAREPOINT", "SQLSERVER", "TERADATA", "WORKDAY_RAAS"`, v) } } @@ -790,31 +795,20 @@ func (f *IngestionSourceType) Set(v string) error { func (f *IngestionSourceType) Values() []IngestionSourceType { return []IngestionSourceType{ IngestionSourceTypeBigquery, - IngestionSourceTypeConfluence, IngestionSourceTypeDynamics365, IngestionSourceTypeForeignCatalog, IngestionSourceTypeGa4RawData, - IngestionSourceTypeGoogleAds, - IngestionSourceTypeGuidewire, - IngestionSourceTypeHubspot, IngestionSourceTypeManagedPostgresql, - IngestionSourceTypeMetaMarketing, IngestionSourceTypeMysql, IngestionSourceTypeNetsuite, IngestionSourceTypeOracle, IngestionSourceTypePostgresql, - IngestionSourceTypeRedshift, IngestionSourceTypeSalesforce, - IngestionSourceTypeSalesforceMarketingCloud, IngestionSourceTypeServicenow, IngestionSourceTypeSharepoint, - IngestionSourceTypeSqldw, IngestionSourceTypeSqlserver, IngestionSourceTypeTeradata, - IngestionSourceTypeTiktokAds, - IngestionSourceTypeWorkdayHcm, IngestionSourceTypeWorkdayRaas, - IngestionSourceTypeZendesk, } } @@ -1798,6 +1792,50 @@ func (s RestartWindow) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Configuration for rewinding a specific dataset. +type RewindDatasetSpec struct { + // Whether to cascade the rewind to dependent datasets. Must be specified. + Cascade bool `json:"cascade,omitempty"` + // The identifier of the dataset (e.g., "main.foo.tbl1"). + Identifier string `json:"identifier,omitempty"` + // Whether to reset checkpoints for this dataset. + ResetCheckpoints bool `json:"reset_checkpoints,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *RewindDatasetSpec) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RewindDatasetSpec) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Information about a rewind being requested for this pipeline or some of the +// datasets in it. +type RewindSpec struct { + // List of datasets to rewind with specific configuration for each. When not + // specified, all datasets will be rewound with cascade = true and + // reset_checkpoints = true. + Datasets []RewindDatasetSpec `json:"datasets,omitempty"` + // If true, this is a dry run and we should emit the RewindSummary but not + // perform the rewind. + DryRun bool `json:"dry_run,omitempty"` + // The base timestamp to rewind to. Must be specified. + RewindTimestamp string `json:"rewind_timestamp,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *RewindSpec) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RewindSpec) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Write-only setting, available only in Create/Update calls. Specifies the user // or service principal that the pipeline runs as. If not specified, the // pipeline runs as the user who created the pipeline. @@ -1948,6 +1986,9 @@ type StartUpdate struct { // Refresh on a table means that the states of the table will be reset // before the refresh. RefreshSelection []string `json:"refresh_selection,omitempty"` + // The information about the requested rewind operation. If specified this + // is a rewind mode update. + RewindSpec *RewindSpec `json:"rewind_spec,omitempty"` // If true, this update only validates the correctness of pipeline source // code but does not materialize or publish any datasets. ValidateOnly bool `json:"validate_only,omitempty"` @@ -2088,8 +2129,8 @@ type TableSpecificConfig struct { // The SCD type to use to ingest the table. ScdType TableSpecificConfigScdType `json:"scd_type,omitempty"` // The column names specifying the logical order of events in the source - // data. Delta Live Tables uses this sequencing to handle change events that - // arrive out of order. + // data. Spark Declarative Pipelines uses this sequencing to handle change + // events that arrive out of order. SequenceBy []string `json:"sequence_by,omitempty"` // (Optional) Additional custom parameters for Workday Report WorkdayReportParameters *IngestionPipelineDefinitionWorkdayReportParameters `json:"workday_report_parameters,omitempty"` diff --git a/service/pkg.go b/service/pkg.go index 96e82b61c..46fd9087e 100755 --- a/service/pkg.go +++ b/service/pkg.go @@ -206,7 +206,7 @@ // // - [settings.PersonalComputeAPI]: The Personal Compute enablement setting lets you control which users can use the Personal Compute default policy to create compute resources. // -// - [pipelines.PipelinesAPI]: The Delta Live Tables API allows you to create, edit, delete, start, and view details about pipelines. +// - [pipelines.PipelinesAPI]: The Lakeflow Spark Declarative Pipelines API allows you to create, edit, delete, start, and view details about pipelines. // // - [catalog.PoliciesAPI]: Attribute-Based Access Control (ABAC) provides high leverage governance for enforcing compliance policies in Unity Catalog. // @@ -266,7 +266,7 @@ // // - [settings.RestrictWorkspaceAdminsAPI]: The Restrict Workspace Admins setting lets you control the capabilities of workspace admins. // -// - [catalog.RfaAPI]: Request for Access enables customers to request access to and manage access request destinations for Unity Catalog securables. +// - [catalog.RfaAPI]: Request for Access enables users to request access for Unity Catalog securables. // // - [catalog.SchemasAPI]: A schema (also called a database) is the second layer of Unity Catalog’s three-level namespace. // diff --git a/service/settings/model.go b/service/settings/model.go index 6a34a10b8..0f54faa68 100755 --- a/service/settings/model.go +++ b/service/settings/model.go @@ -808,6 +808,10 @@ func (s CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRule) MarshalJS type CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionState string +const CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionStateCreateFailed CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionState = `CREATE_FAILED` + +const CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionStateCreating CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionState = `CREATING` + const CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionStateDisconnected CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionState = `DISCONNECTED` const CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionStateEstablished CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionState = `ESTABLISHED` @@ -826,11 +830,11 @@ func (f *CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLin // Set raw string value and validate it against allowed values func (f *CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionState) Set(v string) error { switch v { - case `DISCONNECTED`, `ESTABLISHED`, `EXPIRED`, `PENDING`, `REJECTED`: + case `CREATE_FAILED`, `CREATING`, `DISCONNECTED`, `ESTABLISHED`, `EXPIRED`, `PENDING`, `REJECTED`: *f = CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionState(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "DISCONNECTED", "ESTABLISHED", "EXPIRED", "PENDING", "REJECTED"`, v) + return fmt.Errorf(`value "%s" is not one of "CREATE_FAILED", "CREATING", "DISCONNECTED", "ESTABLISHED", "EXPIRED", "PENDING", "REJECTED"`, v) } } @@ -839,6 +843,8 @@ func (f *CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLin // There is no guarantee on the order of the values in the slice. func (f *CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionState) Values() []CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionState { return []CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionState{ + CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionStateCreateFailed, + CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionStateCreating, CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionStateDisconnected, CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionStateEstablished, CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionStateExpired, @@ -3308,6 +3314,10 @@ func (s NccAzurePrivateEndpointRule) MarshalJSON() ([]byte, error) { type NccAzurePrivateEndpointRuleConnectionState string +const NccAzurePrivateEndpointRuleConnectionStateCreateFailed NccAzurePrivateEndpointRuleConnectionState = `CREATE_FAILED` + +const NccAzurePrivateEndpointRuleConnectionStateCreating NccAzurePrivateEndpointRuleConnectionState = `CREATING` + const NccAzurePrivateEndpointRuleConnectionStateDisconnected NccAzurePrivateEndpointRuleConnectionState = `DISCONNECTED` const NccAzurePrivateEndpointRuleConnectionStateEstablished NccAzurePrivateEndpointRuleConnectionState = `ESTABLISHED` @@ -3328,11 +3338,11 @@ func (f *NccAzurePrivateEndpointRuleConnectionState) String() string { // Set raw string value and validate it against allowed values func (f *NccAzurePrivateEndpointRuleConnectionState) Set(v string) error { switch v { - case `DISCONNECTED`, `ESTABLISHED`, `EXPIRED`, `INIT`, `PENDING`, `REJECTED`: + case `CREATE_FAILED`, `CREATING`, `DISCONNECTED`, `ESTABLISHED`, `EXPIRED`, `INIT`, `PENDING`, `REJECTED`: *f = NccAzurePrivateEndpointRuleConnectionState(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "DISCONNECTED", "ESTABLISHED", "EXPIRED", "INIT", "PENDING", "REJECTED"`, v) + return fmt.Errorf(`value "%s" is not one of "CREATE_FAILED", "CREATING", "DISCONNECTED", "ESTABLISHED", "EXPIRED", "INIT", "PENDING", "REJECTED"`, v) } } @@ -3341,6 +3351,8 @@ func (f *NccAzurePrivateEndpointRuleConnectionState) Set(v string) error { // There is no guarantee on the order of the values in the slice. func (f *NccAzurePrivateEndpointRuleConnectionState) Values() []NccAzurePrivateEndpointRuleConnectionState { return []NccAzurePrivateEndpointRuleConnectionState{ + NccAzurePrivateEndpointRuleConnectionStateCreateFailed, + NccAzurePrivateEndpointRuleConnectionStateCreating, NccAzurePrivateEndpointRuleConnectionStateDisconnected, NccAzurePrivateEndpointRuleConnectionStateEstablished, NccAzurePrivateEndpointRuleConnectionStateExpired, @@ -3484,10 +3496,6 @@ func (s NccPrivateEndpointRule) MarshalJSON() ([]byte, error) { type NccPrivateEndpointRulePrivateLinkConnectionState string -const NccPrivateEndpointRulePrivateLinkConnectionStateCreateFailed NccPrivateEndpointRulePrivateLinkConnectionState = `CREATE_FAILED` - -const NccPrivateEndpointRulePrivateLinkConnectionStateCreating NccPrivateEndpointRulePrivateLinkConnectionState = `CREATING` - const NccPrivateEndpointRulePrivateLinkConnectionStateDisconnected NccPrivateEndpointRulePrivateLinkConnectionState = `DISCONNECTED` const NccPrivateEndpointRulePrivateLinkConnectionStateEstablished NccPrivateEndpointRulePrivateLinkConnectionState = `ESTABLISHED` @@ -3506,11 +3514,11 @@ func (f *NccPrivateEndpointRulePrivateLinkConnectionState) String() string { // Set raw string value and validate it against allowed values func (f *NccPrivateEndpointRulePrivateLinkConnectionState) Set(v string) error { switch v { - case `CREATE_FAILED`, `CREATING`, `DISCONNECTED`, `ESTABLISHED`, `EXPIRED`, `PENDING`, `REJECTED`: + case `DISCONNECTED`, `ESTABLISHED`, `EXPIRED`, `PENDING`, `REJECTED`: *f = NccPrivateEndpointRulePrivateLinkConnectionState(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "CREATE_FAILED", "CREATING", "DISCONNECTED", "ESTABLISHED", "EXPIRED", "PENDING", "REJECTED"`, v) + return fmt.Errorf(`value "%s" is not one of "DISCONNECTED", "ESTABLISHED", "EXPIRED", "PENDING", "REJECTED"`, v) } } @@ -3519,8 +3527,6 @@ func (f *NccPrivateEndpointRulePrivateLinkConnectionState) Set(v string) error { // There is no guarantee on the order of the values in the slice. func (f *NccPrivateEndpointRulePrivateLinkConnectionState) Values() []NccPrivateEndpointRulePrivateLinkConnectionState { return []NccPrivateEndpointRulePrivateLinkConnectionState{ - NccPrivateEndpointRulePrivateLinkConnectionStateCreateFailed, - NccPrivateEndpointRulePrivateLinkConnectionStateCreating, NccPrivateEndpointRulePrivateLinkConnectionStateDisconnected, NccPrivateEndpointRulePrivateLinkConnectionStateEstablished, NccPrivateEndpointRulePrivateLinkConnectionStateExpired, diff --git a/service/settingsv2/api.go b/service/settingsv2/api.go index 077c2875a..be6434737 100755 --- a/service/settingsv2/api.go +++ b/service/settingsv2/api.go @@ -33,7 +33,9 @@ type AccountSettingsV2Interface interface { // Patch a setting value at account level. See // :method:settingsv2/listaccountsettingsmetadata for list of setting available - // via public APIs at account level. + // via public APIs at account level. To determine the correct field to include + // in a patch request, refer to the type field of the setting returned in the + // :method:settingsv2/listaccountsettingsmetadata response. PatchPublicAccountSetting(ctx context.Context, request PatchPublicAccountSettingRequest) (*Setting, error) } @@ -73,7 +75,9 @@ type WorkspaceSettingsV2Interface interface { // Patch a setting value at workspace level. See // :method:settingsv2/listworkspacesettingsmetadata for list of setting - // available via public APIs at workspace level. + // available via public APIs at workspace level. To determine the correct field + // to include in a patch request, refer to the type field of the setting + // returned in the :method:settingsv2/listworkspacesettingsmetadata response. PatchPublicWorkspaceSetting(ctx context.Context, request PatchPublicWorkspaceSettingRequest) (*Setting, error) } diff --git a/service/settingsv2/interface.go b/service/settingsv2/interface.go index 96e8ac399..7aebd9a53 100755 --- a/service/settingsv2/interface.go +++ b/service/settingsv2/interface.go @@ -23,7 +23,10 @@ type AccountSettingsV2Service interface { // Patch a setting value at account level. See // :method:settingsv2/listaccountsettingsmetadata for list of setting - // available via public APIs at account level. + // available via public APIs at account level. To determine the correct + // field to include in a patch request, refer to the type field of the + // setting returned in the :method:settingsv2/listaccountsettingsmetadata + // response. PatchPublicAccountSetting(ctx context.Context, request PatchPublicAccountSettingRequest) (*Setting, error) } @@ -44,6 +47,9 @@ type WorkspaceSettingsV2Service interface { // Patch a setting value at workspace level. See // :method:settingsv2/listworkspacesettingsmetadata for list of setting - // available via public APIs at workspace level. + // available via public APIs at workspace level. To determine the correct + // field to include in a patch request, refer to the type field of the + // setting returned in the :method:settingsv2/listworkspacesettingsmetadata + // response. PatchPublicWorkspaceSetting(ctx context.Context, request PatchPublicWorkspaceSettingRequest) (*Setting, error) } diff --git a/service/settingsv2/model.go b/service/settingsv2/model.go index 411be751b..241b62317 100755 --- a/service/settingsv2/model.go +++ b/service/settingsv2/model.go @@ -255,6 +255,7 @@ type GetPublicAccountSettingRequest struct { } type GetPublicWorkspaceSettingRequest struct { + // Name of the setting Name string `json:"-" url:"-"` } @@ -365,6 +366,7 @@ type PatchPublicAccountSettingRequest struct { } type PatchPublicWorkspaceSettingRequest struct { + // Name of the setting Name string `json:"-" url:"-"` Setting Setting `json:"setting"` @@ -459,38 +461,64 @@ func (f *RestrictWorkspaceAdminsMessageStatus) Type() string { } type Setting struct { + // Setting value for aibi_dashboard_embedding_access_policy setting. This is + // the setting value set by consumers, check + // effective_aibi_dashboard_embedding_access_policy for final setting value. AibiDashboardEmbeddingAccessPolicy *AibiDashboardEmbeddingAccessPolicy `json:"aibi_dashboard_embedding_access_policy,omitempty"` - + // Setting value for aibi_dashboard_embedding_approved_domains setting. This + // is the setting value set by consumers, check + // effective_aibi_dashboard_embedding_approved_domains for final setting + // value. AibiDashboardEmbeddingApprovedDomains *AibiDashboardEmbeddingApprovedDomains `json:"aibi_dashboard_embedding_approved_domains,omitempty"` - + // Setting value for automatic_cluster_update_workspace setting. This is the + // setting value set by consumers, check + // effective_automatic_cluster_update_workspace for final setting value. AutomaticClusterUpdateWorkspace *ClusterAutoRestartMessage `json:"automatic_cluster_update_workspace,omitempty"` - + // Setting value for boolean type setting. This is the setting value set by + // consumers, check effective_boolean_val for final setting value. BooleanVal *BooleanMessage `json:"boolean_val,omitempty"` - + // Effective setting value for aibi_dashboard_embedding_access_policy + // setting. This is the final effective value of setting. To set a value use + // aibi_dashboard_embedding_access_policy. EffectiveAibiDashboardEmbeddingAccessPolicy *AibiDashboardEmbeddingAccessPolicy `json:"effective_aibi_dashboard_embedding_access_policy,omitempty"` - + // Effective setting value for aibi_dashboard_embedding_approved_domains + // setting. This is the final effective value of setting. To set a value use + // aibi_dashboard_embedding_approved_domains. EffectiveAibiDashboardEmbeddingApprovedDomains *AibiDashboardEmbeddingApprovedDomains `json:"effective_aibi_dashboard_embedding_approved_domains,omitempty"` - + // Effective setting value for automatic_cluster_update_workspace setting. + // This is the final effective value of setting. To set a value use + // automatic_cluster_update_workspace. EffectiveAutomaticClusterUpdateWorkspace *ClusterAutoRestartMessage `json:"effective_automatic_cluster_update_workspace,omitempty"` - + // Effective setting value for boolean type setting. This is the final + // effective value of setting. To set a value use boolean_val. EffectiveBooleanVal *BooleanMessage `json:"effective_boolean_val,omitempty"` - + // Effective setting value for integer type setting. This is the final + // effective value of setting. To set a value use integer_val. EffectiveIntegerVal *IntegerMessage `json:"effective_integer_val,omitempty"` - + // Effective setting value for personal_compute setting. This is the final + // effective value of setting. To set a value use personal_compute. EffectivePersonalCompute *PersonalComputeMessage `json:"effective_personal_compute,omitempty"` - + // Effective setting value for restrict_workspace_admins setting. This is + // the final effective value of setting. To set a value use + // restrict_workspace_admins. EffectiveRestrictWorkspaceAdmins *RestrictWorkspaceAdminsMessage `json:"effective_restrict_workspace_admins,omitempty"` - + // Effective setting value for string type setting. This is the final + // effective value of setting. To set a value use string_val. EffectiveStringVal *StringMessage `json:"effective_string_val,omitempty"` - + // Setting value for integer type setting. This is the setting value set by + // consumers, check effective_integer_val for final setting value. IntegerVal *IntegerMessage `json:"integer_val,omitempty"` // Name of the setting. Name string `json:"name,omitempty"` - + // Setting value for personal_compute setting. This is the setting value set + // by consumers, check effective_personal_compute for final setting value. PersonalCompute *PersonalComputeMessage `json:"personal_compute,omitempty"` - + // Setting value for restrict_workspace_admins setting. This is the setting + // value set by consumers, check effective_restrict_workspace_admins for + // final setting value. RestrictWorkspaceAdmins *RestrictWorkspaceAdminsMessage `json:"restrict_workspace_admins,omitempty"` - + // Setting value for string type setting. This is the setting value set by + // consumers, check effective_string_val for final setting value. StringVal *StringMessage `json:"string_val,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -511,8 +539,8 @@ type SettingsMetadata struct { DocsLink string `json:"docs_link,omitempty"` // Name of the setting. Name string `json:"name,omitempty"` - // Type of the setting. To set this setting, the value sent must match this - // type. + // Sample message depicting the type of the setting. To set this setting, + // the value sent must match this type. Type string `json:"type,omitempty"` ForceSendFields []string `json:"-" url:"-"` diff --git a/service/sql/model.go b/service/sql/model.go index 798890445..b568ad39e 100755 --- a/service/sql/model.go +++ b/service/sql/model.go @@ -611,6 +611,7 @@ type AlertV2Operand struct { } type AlertV2OperandColumn struct { + // If not set, the behavior is equivalent to using `First row` in the UI. Aggregation Aggregation `json:"aggregation,omitempty"` Display string `json:"display,omitempty"` @@ -5239,8 +5240,6 @@ const TerminationReasonCodeDockerImageTooLargeForInstanceException TerminationRe const TerminationReasonCodeDockerInvalidOsException TerminationReasonCode = `DOCKER_INVALID_OS_EXCEPTION` -const TerminationReasonCodeDriverDnsResolutionFailure TerminationReasonCode = `DRIVER_DNS_RESOLUTION_FAILURE` - const TerminationReasonCodeDriverEviction TerminationReasonCode = `DRIVER_EVICTION` const TerminationReasonCodeDriverLaunchTimeout TerminationReasonCode = `DRIVER_LAUNCH_TIMEOUT` @@ -5389,10 +5388,6 @@ const TerminationReasonCodeNetworkConfigurationFailure TerminationReasonCode = ` const TerminationReasonCodeNfsMountFailure TerminationReasonCode = `NFS_MOUNT_FAILURE` -const TerminationReasonCodeNoActivatedK8s TerminationReasonCode = `NO_ACTIVATED_K8S` - -const TerminationReasonCodeNoActivatedK8sTestingTag TerminationReasonCode = `NO_ACTIVATED_K8S_TESTING_TAG` - const TerminationReasonCodeNoMatchedK8s TerminationReasonCode = `NO_MATCHED_K8S` const TerminationReasonCodeNoMatchedK8sTestingTag TerminationReasonCode = `NO_MATCHED_K8S_TESTING_TAG` @@ -5417,8 +5412,6 @@ const TerminationReasonCodeSecretPermissionDenied TerminationReasonCode = `SECRE const TerminationReasonCodeSecretResolutionError TerminationReasonCode = `SECRET_RESOLUTION_ERROR` -const TerminationReasonCodeSecurityAgentsFailedInitialVerification TerminationReasonCode = `SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION` - const TerminationReasonCodeSecurityDaemonRegistrationException TerminationReasonCode = `SECURITY_DAEMON_REGISTRATION_EXCEPTION` const TerminationReasonCodeSelfBootstrapFailure TerminationReasonCode = `SELF_BOOTSTRAP_FAILURE` @@ -5491,11 +5484,11 @@ func (f *TerminationReasonCode) String() string { // Set raw string value and validate it against allowed values func (f *TerminationReasonCode) Set(v string) error { switch v { - case `ABUSE_DETECTED`, `ACCESS_TOKEN_FAILURE`, `ALLOCATION_TIMEOUT`, `ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY`, `ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_READY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS`, `ATTACH_PROJECT_FAILURE`, `AWS_AUTHORIZATION_FAILURE`, `AWS_INACCESSIBLE_KMS_KEY_FAILURE`, `AWS_INSTANCE_PROFILE_UPDATE_FAILURE`, `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE`, `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE`, `AWS_INVALID_KEY_PAIR`, `AWS_INVALID_KMS_KEY_STATE`, `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE`, `AWS_REQUEST_LIMIT_EXCEEDED`, `AWS_RESOURCE_QUOTA_EXCEEDED`, `AWS_UNSUPPORTED_FAILURE`, `AZURE_BYOK_KEY_PERMISSION_FAILURE`, `AZURE_EPHEMERAL_DISK_FAILURE`, `AZURE_INVALID_DEPLOYMENT_TEMPLATE`, `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION`, `AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE`, `AZURE_QUOTA_EXCEEDED_EXCEPTION`, `AZURE_RESOURCE_MANAGER_THROTTLING`, `AZURE_RESOURCE_PROVIDER_THROTTLING`, `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE`, `AZURE_VM_EXTENSION_FAILURE`, `AZURE_VNET_CONFIGURATION_FAILURE`, `BOOTSTRAP_TIMEOUT`, `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION`, `BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG`, `BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED`, `BUDGET_POLICY_RESOLUTION_FAILURE`, `CLOUD_ACCOUNT_POD_QUOTA_EXCEEDED`, `CLOUD_ACCOUNT_SETUP_FAILURE`, `CLOUD_OPERATION_CANCELLED`, `CLOUD_PROVIDER_DISK_SETUP_FAILURE`, `CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED`, `CLOUD_PROVIDER_LAUNCH_FAILURE`, `CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_SHUTDOWN`, `CLUSTER_OPERATION_THROTTLED`, `CLUSTER_OPERATION_TIMEOUT`, `COMMUNICATION_LOST`, `CONTAINER_LAUNCH_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG`, `DATABASE_CONNECTION_FAILURE`, `DATA_ACCESS_CONFIG_CHANGED`, `DBFS_COMPONENT_UNHEALTHY`, `DBR_IMAGE_RESOLUTION_FAILURE`, `DISASTER_RECOVERY_REPLICATION`, `DNS_RESOLUTION_ERROR`, `DOCKER_CONTAINER_CREATION_EXCEPTION`, `DOCKER_IMAGE_PULL_FAILURE`, `DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION`, `DOCKER_INVALID_OS_EXCEPTION`, `DRIVER_DNS_RESOLUTION_FAILURE`, `DRIVER_EVICTION`, `DRIVER_LAUNCH_TIMEOUT`, `DRIVER_NODE_UNREACHABLE`, `DRIVER_OUT_OF_DISK`, `DRIVER_OUT_OF_MEMORY`, `DRIVER_POD_CREATION_FAILURE`, `DRIVER_UNEXPECTED_FAILURE`, `DRIVER_UNHEALTHY`, `DRIVER_UNREACHABLE`, `DRIVER_UNRESPONSIVE`, `DYNAMIC_SPARK_CONF_SIZE_EXCEEDED`, `EOS_SPARK_IMAGE`, `EXECUTION_COMPONENT_UNHEALTHY`, `EXECUTOR_POD_UNSCHEDULED`, `GCP_API_RATE_QUOTA_EXCEEDED`, `GCP_DENIED_BY_ORG_POLICY`, `GCP_FORBIDDEN`, `GCP_IAM_TIMEOUT`, `GCP_INACCESSIBLE_KMS_KEY_FAILURE`, `GCP_INSUFFICIENT_CAPACITY`, `GCP_IP_SPACE_EXHAUSTED`, `GCP_KMS_KEY_PERMISSION_DENIED`, `GCP_NOT_FOUND`, `GCP_QUOTA_EXCEEDED`, `GCP_RESOURCE_QUOTA_EXCEEDED`, `GCP_SERVICE_ACCOUNT_ACCESS_DENIED`, `GCP_SERVICE_ACCOUNT_DELETED`, `GCP_SERVICE_ACCOUNT_NOT_FOUND`, `GCP_SUBNET_NOT_READY`, `GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED`, `GKE_BASED_CLUSTER_TERMINATION`, `GLOBAL_INIT_SCRIPT_FAILURE`, `HIVE_METASTORE_PROVISIONING_FAILURE`, `IMAGE_PULL_PERMISSION_DENIED`, `INACTIVITY`, `INIT_CONTAINER_NOT_FINISHED`, `INIT_SCRIPT_FAILURE`, `INSTANCE_POOL_CLUSTER_FAILURE`, `INSTANCE_POOL_MAX_CAPACITY_REACHED`, `INSTANCE_POOL_NOT_FOUND`, `INSTANCE_UNREACHABLE`, `INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG`, `INTERNAL_CAPACITY_FAILURE`, `INTERNAL_ERROR`, `INVALID_ARGUMENT`, `INVALID_AWS_PARAMETER`, `INVALID_INSTANCE_PLACEMENT_PROTOCOL`, `INVALID_SPARK_IMAGE`, `INVALID_WORKER_IMAGE_FAILURE`, `IN_PENALTY_BOX`, `IP_EXHAUSTION_FAILURE`, `JOB_FINISHED`, `K8S_ACTIVE_POD_QUOTA_EXCEEDED`, `K8S_AUTOSCALING_FAILURE`, `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT`, `LAZY_ALLOCATION_TIMEOUT`, `MAINTENANCE_MODE`, `METASTORE_COMPONENT_UNHEALTHY`, `NEPHOS_RESOURCE_MANAGEMENT`, `NETVISOR_SETUP_TIMEOUT`, `NETWORK_CHECK_CONTROL_PLANE_FAILURE`, `NETWORK_CHECK_CONTROL_PLANE_FAILURE_DUE_TO_MISCONFIG`, `NETWORK_CHECK_DNS_SERVER_FAILURE`, `NETWORK_CHECK_DNS_SERVER_FAILURE_DUE_TO_MISCONFIG`, `NETWORK_CHECK_METADATA_ENDPOINT_FAILURE`, `NETWORK_CHECK_METADATA_ENDPOINT_FAILURE_DUE_TO_MISCONFIG`, `NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE`, `NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE_DUE_TO_MISCONFIG`, `NETWORK_CHECK_NIC_FAILURE`, `NETWORK_CHECK_NIC_FAILURE_DUE_TO_MISCONFIG`, `NETWORK_CHECK_STORAGE_FAILURE`, `NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG`, `NETWORK_CONFIGURATION_FAILURE`, `NFS_MOUNT_FAILURE`, `NO_ACTIVATED_K8S`, `NO_ACTIVATED_K8S_TESTING_TAG`, `NO_MATCHED_K8S`, `NO_MATCHED_K8S_TESTING_TAG`, `NPIP_TUNNEL_SETUP_FAILURE`, `NPIP_TUNNEL_TOKEN_FAILURE`, `POD_ASSIGNMENT_FAILURE`, `POD_SCHEDULING_FAILURE`, `REQUEST_REJECTED`, `REQUEST_THROTTLED`, `RESOURCE_USAGE_BLOCKED`, `SECRET_CREATION_FAILURE`, `SECRET_PERMISSION_DENIED`, `SECRET_RESOLUTION_ERROR`, `SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION`, `SECURITY_DAEMON_REGISTRATION_EXCEPTION`, `SELF_BOOTSTRAP_FAILURE`, `SERVERLESS_LONG_RUNNING_TERMINATED`, `SKIPPED_SLOW_NODES`, `SLOW_IMAGE_DOWNLOAD`, `SPARK_ERROR`, `SPARK_IMAGE_DOWNLOAD_FAILURE`, `SPARK_IMAGE_DOWNLOAD_THROTTLED`, `SPARK_IMAGE_NOT_FOUND`, `SPARK_STARTUP_FAILURE`, `SPOT_INSTANCE_TERMINATION`, `SSH_BOOTSTRAP_FAILURE`, `STORAGE_DOWNLOAD_FAILURE`, `STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG`, `STORAGE_DOWNLOAD_FAILURE_SLOW`, `STORAGE_DOWNLOAD_FAILURE_THROTTLED`, `STS_CLIENT_SETUP_FAILURE`, `SUBNET_EXHAUSTED_FAILURE`, `TEMPORARILY_UNAVAILABLE`, `TRIAL_EXPIRED`, `UNEXPECTED_LAUNCH_FAILURE`, `UNEXPECTED_POD_RECREATION`, `UNKNOWN`, `UNSUPPORTED_INSTANCE_TYPE`, `UPDATE_INSTANCE_PROFILE_FAILURE`, `USAGE_POLICY_ENTITLEMENT_DENIED`, `USER_INITIATED_VM_TERMINATION`, `USER_REQUEST`, `WORKER_SETUP_FAILURE`, `WORKSPACE_CANCELLED_ERROR`, `WORKSPACE_CONFIGURATION_ERROR`, `WORKSPACE_UPDATE`: + case `ABUSE_DETECTED`, `ACCESS_TOKEN_FAILURE`, `ALLOCATION_TIMEOUT`, `ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY`, `ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_READY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS`, `ATTACH_PROJECT_FAILURE`, `AWS_AUTHORIZATION_FAILURE`, `AWS_INACCESSIBLE_KMS_KEY_FAILURE`, `AWS_INSTANCE_PROFILE_UPDATE_FAILURE`, `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE`, `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE`, `AWS_INVALID_KEY_PAIR`, `AWS_INVALID_KMS_KEY_STATE`, `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE`, `AWS_REQUEST_LIMIT_EXCEEDED`, `AWS_RESOURCE_QUOTA_EXCEEDED`, `AWS_UNSUPPORTED_FAILURE`, `AZURE_BYOK_KEY_PERMISSION_FAILURE`, `AZURE_EPHEMERAL_DISK_FAILURE`, `AZURE_INVALID_DEPLOYMENT_TEMPLATE`, `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION`, `AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE`, `AZURE_QUOTA_EXCEEDED_EXCEPTION`, `AZURE_RESOURCE_MANAGER_THROTTLING`, `AZURE_RESOURCE_PROVIDER_THROTTLING`, `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE`, `AZURE_VM_EXTENSION_FAILURE`, `AZURE_VNET_CONFIGURATION_FAILURE`, `BOOTSTRAP_TIMEOUT`, `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION`, `BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG`, `BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED`, `BUDGET_POLICY_RESOLUTION_FAILURE`, `CLOUD_ACCOUNT_POD_QUOTA_EXCEEDED`, `CLOUD_ACCOUNT_SETUP_FAILURE`, `CLOUD_OPERATION_CANCELLED`, `CLOUD_PROVIDER_DISK_SETUP_FAILURE`, `CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED`, `CLOUD_PROVIDER_LAUNCH_FAILURE`, `CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_SHUTDOWN`, `CLUSTER_OPERATION_THROTTLED`, `CLUSTER_OPERATION_TIMEOUT`, `COMMUNICATION_LOST`, `CONTAINER_LAUNCH_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG`, `DATABASE_CONNECTION_FAILURE`, `DATA_ACCESS_CONFIG_CHANGED`, `DBFS_COMPONENT_UNHEALTHY`, `DBR_IMAGE_RESOLUTION_FAILURE`, `DISASTER_RECOVERY_REPLICATION`, `DNS_RESOLUTION_ERROR`, `DOCKER_CONTAINER_CREATION_EXCEPTION`, `DOCKER_IMAGE_PULL_FAILURE`, `DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION`, `DOCKER_INVALID_OS_EXCEPTION`, `DRIVER_EVICTION`, `DRIVER_LAUNCH_TIMEOUT`, `DRIVER_NODE_UNREACHABLE`, `DRIVER_OUT_OF_DISK`, `DRIVER_OUT_OF_MEMORY`, `DRIVER_POD_CREATION_FAILURE`, `DRIVER_UNEXPECTED_FAILURE`, `DRIVER_UNHEALTHY`, `DRIVER_UNREACHABLE`, `DRIVER_UNRESPONSIVE`, `DYNAMIC_SPARK_CONF_SIZE_EXCEEDED`, `EOS_SPARK_IMAGE`, `EXECUTION_COMPONENT_UNHEALTHY`, `EXECUTOR_POD_UNSCHEDULED`, `GCP_API_RATE_QUOTA_EXCEEDED`, `GCP_DENIED_BY_ORG_POLICY`, `GCP_FORBIDDEN`, `GCP_IAM_TIMEOUT`, `GCP_INACCESSIBLE_KMS_KEY_FAILURE`, `GCP_INSUFFICIENT_CAPACITY`, `GCP_IP_SPACE_EXHAUSTED`, `GCP_KMS_KEY_PERMISSION_DENIED`, `GCP_NOT_FOUND`, `GCP_QUOTA_EXCEEDED`, `GCP_RESOURCE_QUOTA_EXCEEDED`, `GCP_SERVICE_ACCOUNT_ACCESS_DENIED`, `GCP_SERVICE_ACCOUNT_DELETED`, `GCP_SERVICE_ACCOUNT_NOT_FOUND`, `GCP_SUBNET_NOT_READY`, `GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED`, `GKE_BASED_CLUSTER_TERMINATION`, `GLOBAL_INIT_SCRIPT_FAILURE`, `HIVE_METASTORE_PROVISIONING_FAILURE`, `IMAGE_PULL_PERMISSION_DENIED`, `INACTIVITY`, `INIT_CONTAINER_NOT_FINISHED`, `INIT_SCRIPT_FAILURE`, `INSTANCE_POOL_CLUSTER_FAILURE`, `INSTANCE_POOL_MAX_CAPACITY_REACHED`, `INSTANCE_POOL_NOT_FOUND`, `INSTANCE_UNREACHABLE`, `INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG`, `INTERNAL_CAPACITY_FAILURE`, `INTERNAL_ERROR`, `INVALID_ARGUMENT`, `INVALID_AWS_PARAMETER`, `INVALID_INSTANCE_PLACEMENT_PROTOCOL`, `INVALID_SPARK_IMAGE`, `INVALID_WORKER_IMAGE_FAILURE`, `IN_PENALTY_BOX`, `IP_EXHAUSTION_FAILURE`, `JOB_FINISHED`, `K8S_ACTIVE_POD_QUOTA_EXCEEDED`, `K8S_AUTOSCALING_FAILURE`, `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT`, `LAZY_ALLOCATION_TIMEOUT`, `MAINTENANCE_MODE`, `METASTORE_COMPONENT_UNHEALTHY`, `NEPHOS_RESOURCE_MANAGEMENT`, `NETVISOR_SETUP_TIMEOUT`, `NETWORK_CHECK_CONTROL_PLANE_FAILURE`, `NETWORK_CHECK_CONTROL_PLANE_FAILURE_DUE_TO_MISCONFIG`, `NETWORK_CHECK_DNS_SERVER_FAILURE`, `NETWORK_CHECK_DNS_SERVER_FAILURE_DUE_TO_MISCONFIG`, `NETWORK_CHECK_METADATA_ENDPOINT_FAILURE`, `NETWORK_CHECK_METADATA_ENDPOINT_FAILURE_DUE_TO_MISCONFIG`, `NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE`, `NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE_DUE_TO_MISCONFIG`, `NETWORK_CHECK_NIC_FAILURE`, `NETWORK_CHECK_NIC_FAILURE_DUE_TO_MISCONFIG`, `NETWORK_CHECK_STORAGE_FAILURE`, `NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG`, `NETWORK_CONFIGURATION_FAILURE`, `NFS_MOUNT_FAILURE`, `NO_MATCHED_K8S`, `NO_MATCHED_K8S_TESTING_TAG`, `NPIP_TUNNEL_SETUP_FAILURE`, `NPIP_TUNNEL_TOKEN_FAILURE`, `POD_ASSIGNMENT_FAILURE`, `POD_SCHEDULING_FAILURE`, `REQUEST_REJECTED`, `REQUEST_THROTTLED`, `RESOURCE_USAGE_BLOCKED`, `SECRET_CREATION_FAILURE`, `SECRET_PERMISSION_DENIED`, `SECRET_RESOLUTION_ERROR`, `SECURITY_DAEMON_REGISTRATION_EXCEPTION`, `SELF_BOOTSTRAP_FAILURE`, `SERVERLESS_LONG_RUNNING_TERMINATED`, `SKIPPED_SLOW_NODES`, `SLOW_IMAGE_DOWNLOAD`, `SPARK_ERROR`, `SPARK_IMAGE_DOWNLOAD_FAILURE`, `SPARK_IMAGE_DOWNLOAD_THROTTLED`, `SPARK_IMAGE_NOT_FOUND`, `SPARK_STARTUP_FAILURE`, `SPOT_INSTANCE_TERMINATION`, `SSH_BOOTSTRAP_FAILURE`, `STORAGE_DOWNLOAD_FAILURE`, `STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG`, `STORAGE_DOWNLOAD_FAILURE_SLOW`, `STORAGE_DOWNLOAD_FAILURE_THROTTLED`, `STS_CLIENT_SETUP_FAILURE`, `SUBNET_EXHAUSTED_FAILURE`, `TEMPORARILY_UNAVAILABLE`, `TRIAL_EXPIRED`, `UNEXPECTED_LAUNCH_FAILURE`, `UNEXPECTED_POD_RECREATION`, `UNKNOWN`, `UNSUPPORTED_INSTANCE_TYPE`, `UPDATE_INSTANCE_PROFILE_FAILURE`, `USAGE_POLICY_ENTITLEMENT_DENIED`, `USER_INITIATED_VM_TERMINATION`, `USER_REQUEST`, `WORKER_SETUP_FAILURE`, `WORKSPACE_CANCELLED_ERROR`, `WORKSPACE_CONFIGURATION_ERROR`, `WORKSPACE_UPDATE`: *f = TerminationReasonCode(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ABUSE_DETECTED", "ACCESS_TOKEN_FAILURE", "ALLOCATION_TIMEOUT", "ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY", "ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS", "ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_READY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS", "ATTACH_PROJECT_FAILURE", "AWS_AUTHORIZATION_FAILURE", "AWS_INACCESSIBLE_KMS_KEY_FAILURE", "AWS_INSTANCE_PROFILE_UPDATE_FAILURE", "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE", "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE", "AWS_INVALID_KEY_PAIR", "AWS_INVALID_KMS_KEY_STATE", "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE", "AWS_REQUEST_LIMIT_EXCEEDED", "AWS_RESOURCE_QUOTA_EXCEEDED", "AWS_UNSUPPORTED_FAILURE", "AZURE_BYOK_KEY_PERMISSION_FAILURE", "AZURE_EPHEMERAL_DISK_FAILURE", "AZURE_INVALID_DEPLOYMENT_TEMPLATE", "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION", "AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE", "AZURE_QUOTA_EXCEEDED_EXCEPTION", "AZURE_RESOURCE_MANAGER_THROTTLING", "AZURE_RESOURCE_PROVIDER_THROTTLING", "AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE", "AZURE_VM_EXTENSION_FAILURE", "AZURE_VNET_CONFIGURATION_FAILURE", "BOOTSTRAP_TIMEOUT", "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION", "BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG", "BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED", "BUDGET_POLICY_RESOLUTION_FAILURE", "CLOUD_ACCOUNT_POD_QUOTA_EXCEEDED", "CLOUD_ACCOUNT_SETUP_FAILURE", "CLOUD_OPERATION_CANCELLED", "CLOUD_PROVIDER_DISK_SETUP_FAILURE", "CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED", "CLOUD_PROVIDER_LAUNCH_FAILURE", "CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_RESOURCE_STOCKOUT", "CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_SHUTDOWN", "CLUSTER_OPERATION_THROTTLED", "CLUSTER_OPERATION_TIMEOUT", "COMMUNICATION_LOST", "CONTAINER_LAUNCH_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG", "DATABASE_CONNECTION_FAILURE", "DATA_ACCESS_CONFIG_CHANGED", "DBFS_COMPONENT_UNHEALTHY", "DBR_IMAGE_RESOLUTION_FAILURE", "DISASTER_RECOVERY_REPLICATION", "DNS_RESOLUTION_ERROR", "DOCKER_CONTAINER_CREATION_EXCEPTION", "DOCKER_IMAGE_PULL_FAILURE", "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION", "DOCKER_INVALID_OS_EXCEPTION", "DRIVER_DNS_RESOLUTION_FAILURE", "DRIVER_EVICTION", "DRIVER_LAUNCH_TIMEOUT", "DRIVER_NODE_UNREACHABLE", "DRIVER_OUT_OF_DISK", "DRIVER_OUT_OF_MEMORY", "DRIVER_POD_CREATION_FAILURE", "DRIVER_UNEXPECTED_FAILURE", "DRIVER_UNHEALTHY", "DRIVER_UNREACHABLE", "DRIVER_UNRESPONSIVE", "DYNAMIC_SPARK_CONF_SIZE_EXCEEDED", "EOS_SPARK_IMAGE", "EXECUTION_COMPONENT_UNHEALTHY", "EXECUTOR_POD_UNSCHEDULED", "GCP_API_RATE_QUOTA_EXCEEDED", "GCP_DENIED_BY_ORG_POLICY", "GCP_FORBIDDEN", "GCP_IAM_TIMEOUT", "GCP_INACCESSIBLE_KMS_KEY_FAILURE", "GCP_INSUFFICIENT_CAPACITY", "GCP_IP_SPACE_EXHAUSTED", "GCP_KMS_KEY_PERMISSION_DENIED", "GCP_NOT_FOUND", "GCP_QUOTA_EXCEEDED", "GCP_RESOURCE_QUOTA_EXCEEDED", "GCP_SERVICE_ACCOUNT_ACCESS_DENIED", "GCP_SERVICE_ACCOUNT_DELETED", "GCP_SERVICE_ACCOUNT_NOT_FOUND", "GCP_SUBNET_NOT_READY", "GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED", "GKE_BASED_CLUSTER_TERMINATION", "GLOBAL_INIT_SCRIPT_FAILURE", "HIVE_METASTORE_PROVISIONING_FAILURE", "IMAGE_PULL_PERMISSION_DENIED", "INACTIVITY", "INIT_CONTAINER_NOT_FINISHED", "INIT_SCRIPT_FAILURE", "INSTANCE_POOL_CLUSTER_FAILURE", "INSTANCE_POOL_MAX_CAPACITY_REACHED", "INSTANCE_POOL_NOT_FOUND", "INSTANCE_UNREACHABLE", "INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG", "INTERNAL_CAPACITY_FAILURE", "INTERNAL_ERROR", "INVALID_ARGUMENT", "INVALID_AWS_PARAMETER", "INVALID_INSTANCE_PLACEMENT_PROTOCOL", "INVALID_SPARK_IMAGE", "INVALID_WORKER_IMAGE_FAILURE", "IN_PENALTY_BOX", "IP_EXHAUSTION_FAILURE", "JOB_FINISHED", "K8S_ACTIVE_POD_QUOTA_EXCEEDED", "K8S_AUTOSCALING_FAILURE", "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT", "LAZY_ALLOCATION_TIMEOUT", "MAINTENANCE_MODE", "METASTORE_COMPONENT_UNHEALTHY", "NEPHOS_RESOURCE_MANAGEMENT", "NETVISOR_SETUP_TIMEOUT", "NETWORK_CHECK_CONTROL_PLANE_FAILURE", "NETWORK_CHECK_CONTROL_PLANE_FAILURE_DUE_TO_MISCONFIG", "NETWORK_CHECK_DNS_SERVER_FAILURE", "NETWORK_CHECK_DNS_SERVER_FAILURE_DUE_TO_MISCONFIG", "NETWORK_CHECK_METADATA_ENDPOINT_FAILURE", "NETWORK_CHECK_METADATA_ENDPOINT_FAILURE_DUE_TO_MISCONFIG", "NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE", "NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE_DUE_TO_MISCONFIG", "NETWORK_CHECK_NIC_FAILURE", "NETWORK_CHECK_NIC_FAILURE_DUE_TO_MISCONFIG", "NETWORK_CHECK_STORAGE_FAILURE", "NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG", "NETWORK_CONFIGURATION_FAILURE", "NFS_MOUNT_FAILURE", "NO_ACTIVATED_K8S", "NO_ACTIVATED_K8S_TESTING_TAG", "NO_MATCHED_K8S", "NO_MATCHED_K8S_TESTING_TAG", "NPIP_TUNNEL_SETUP_FAILURE", "NPIP_TUNNEL_TOKEN_FAILURE", "POD_ASSIGNMENT_FAILURE", "POD_SCHEDULING_FAILURE", "REQUEST_REJECTED", "REQUEST_THROTTLED", "RESOURCE_USAGE_BLOCKED", "SECRET_CREATION_FAILURE", "SECRET_PERMISSION_DENIED", "SECRET_RESOLUTION_ERROR", "SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION", "SECURITY_DAEMON_REGISTRATION_EXCEPTION", "SELF_BOOTSTRAP_FAILURE", "SERVERLESS_LONG_RUNNING_TERMINATED", "SKIPPED_SLOW_NODES", "SLOW_IMAGE_DOWNLOAD", "SPARK_ERROR", "SPARK_IMAGE_DOWNLOAD_FAILURE", "SPARK_IMAGE_DOWNLOAD_THROTTLED", "SPARK_IMAGE_NOT_FOUND", "SPARK_STARTUP_FAILURE", "SPOT_INSTANCE_TERMINATION", "SSH_BOOTSTRAP_FAILURE", "STORAGE_DOWNLOAD_FAILURE", "STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG", "STORAGE_DOWNLOAD_FAILURE_SLOW", "STORAGE_DOWNLOAD_FAILURE_THROTTLED", "STS_CLIENT_SETUP_FAILURE", "SUBNET_EXHAUSTED_FAILURE", "TEMPORARILY_UNAVAILABLE", "TRIAL_EXPIRED", "UNEXPECTED_LAUNCH_FAILURE", "UNEXPECTED_POD_RECREATION", "UNKNOWN", "UNSUPPORTED_INSTANCE_TYPE", "UPDATE_INSTANCE_PROFILE_FAILURE", "USAGE_POLICY_ENTITLEMENT_DENIED", "USER_INITIATED_VM_TERMINATION", "USER_REQUEST", "WORKER_SETUP_FAILURE", "WORKSPACE_CANCELLED_ERROR", "WORKSPACE_CONFIGURATION_ERROR", "WORKSPACE_UPDATE"`, v) + return fmt.Errorf(`value "%s" is not one of "ABUSE_DETECTED", "ACCESS_TOKEN_FAILURE", "ALLOCATION_TIMEOUT", "ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY", "ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS", "ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_READY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS", "ATTACH_PROJECT_FAILURE", "AWS_AUTHORIZATION_FAILURE", "AWS_INACCESSIBLE_KMS_KEY_FAILURE", "AWS_INSTANCE_PROFILE_UPDATE_FAILURE", "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE", "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE", "AWS_INVALID_KEY_PAIR", "AWS_INVALID_KMS_KEY_STATE", "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE", "AWS_REQUEST_LIMIT_EXCEEDED", "AWS_RESOURCE_QUOTA_EXCEEDED", "AWS_UNSUPPORTED_FAILURE", "AZURE_BYOK_KEY_PERMISSION_FAILURE", "AZURE_EPHEMERAL_DISK_FAILURE", "AZURE_INVALID_DEPLOYMENT_TEMPLATE", "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION", "AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE", "AZURE_QUOTA_EXCEEDED_EXCEPTION", "AZURE_RESOURCE_MANAGER_THROTTLING", "AZURE_RESOURCE_PROVIDER_THROTTLING", "AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE", "AZURE_VM_EXTENSION_FAILURE", "AZURE_VNET_CONFIGURATION_FAILURE", "BOOTSTRAP_TIMEOUT", "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION", "BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG", "BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED", "BUDGET_POLICY_RESOLUTION_FAILURE", "CLOUD_ACCOUNT_POD_QUOTA_EXCEEDED", "CLOUD_ACCOUNT_SETUP_FAILURE", "CLOUD_OPERATION_CANCELLED", "CLOUD_PROVIDER_DISK_SETUP_FAILURE", "CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED", "CLOUD_PROVIDER_LAUNCH_FAILURE", "CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_RESOURCE_STOCKOUT", "CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_SHUTDOWN", "CLUSTER_OPERATION_THROTTLED", "CLUSTER_OPERATION_TIMEOUT", "COMMUNICATION_LOST", "CONTAINER_LAUNCH_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG", "DATABASE_CONNECTION_FAILURE", "DATA_ACCESS_CONFIG_CHANGED", "DBFS_COMPONENT_UNHEALTHY", "DBR_IMAGE_RESOLUTION_FAILURE", "DISASTER_RECOVERY_REPLICATION", "DNS_RESOLUTION_ERROR", "DOCKER_CONTAINER_CREATION_EXCEPTION", "DOCKER_IMAGE_PULL_FAILURE", "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION", "DOCKER_INVALID_OS_EXCEPTION", "DRIVER_EVICTION", "DRIVER_LAUNCH_TIMEOUT", "DRIVER_NODE_UNREACHABLE", "DRIVER_OUT_OF_DISK", "DRIVER_OUT_OF_MEMORY", "DRIVER_POD_CREATION_FAILURE", "DRIVER_UNEXPECTED_FAILURE", "DRIVER_UNHEALTHY", "DRIVER_UNREACHABLE", "DRIVER_UNRESPONSIVE", "DYNAMIC_SPARK_CONF_SIZE_EXCEEDED", "EOS_SPARK_IMAGE", "EXECUTION_COMPONENT_UNHEALTHY", "EXECUTOR_POD_UNSCHEDULED", "GCP_API_RATE_QUOTA_EXCEEDED", "GCP_DENIED_BY_ORG_POLICY", "GCP_FORBIDDEN", "GCP_IAM_TIMEOUT", "GCP_INACCESSIBLE_KMS_KEY_FAILURE", "GCP_INSUFFICIENT_CAPACITY", "GCP_IP_SPACE_EXHAUSTED", "GCP_KMS_KEY_PERMISSION_DENIED", "GCP_NOT_FOUND", "GCP_QUOTA_EXCEEDED", "GCP_RESOURCE_QUOTA_EXCEEDED", "GCP_SERVICE_ACCOUNT_ACCESS_DENIED", "GCP_SERVICE_ACCOUNT_DELETED", "GCP_SERVICE_ACCOUNT_NOT_FOUND", "GCP_SUBNET_NOT_READY", "GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED", "GKE_BASED_CLUSTER_TERMINATION", "GLOBAL_INIT_SCRIPT_FAILURE", "HIVE_METASTORE_PROVISIONING_FAILURE", "IMAGE_PULL_PERMISSION_DENIED", "INACTIVITY", "INIT_CONTAINER_NOT_FINISHED", "INIT_SCRIPT_FAILURE", "INSTANCE_POOL_CLUSTER_FAILURE", "INSTANCE_POOL_MAX_CAPACITY_REACHED", "INSTANCE_POOL_NOT_FOUND", "INSTANCE_UNREACHABLE", "INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG", "INTERNAL_CAPACITY_FAILURE", "INTERNAL_ERROR", "INVALID_ARGUMENT", "INVALID_AWS_PARAMETER", "INVALID_INSTANCE_PLACEMENT_PROTOCOL", "INVALID_SPARK_IMAGE", "INVALID_WORKER_IMAGE_FAILURE", "IN_PENALTY_BOX", "IP_EXHAUSTION_FAILURE", "JOB_FINISHED", "K8S_ACTIVE_POD_QUOTA_EXCEEDED", "K8S_AUTOSCALING_FAILURE", "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT", "LAZY_ALLOCATION_TIMEOUT", "MAINTENANCE_MODE", "METASTORE_COMPONENT_UNHEALTHY", "NEPHOS_RESOURCE_MANAGEMENT", "NETVISOR_SETUP_TIMEOUT", "NETWORK_CHECK_CONTROL_PLANE_FAILURE", "NETWORK_CHECK_CONTROL_PLANE_FAILURE_DUE_TO_MISCONFIG", "NETWORK_CHECK_DNS_SERVER_FAILURE", "NETWORK_CHECK_DNS_SERVER_FAILURE_DUE_TO_MISCONFIG", "NETWORK_CHECK_METADATA_ENDPOINT_FAILURE", "NETWORK_CHECK_METADATA_ENDPOINT_FAILURE_DUE_TO_MISCONFIG", "NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE", "NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE_DUE_TO_MISCONFIG", "NETWORK_CHECK_NIC_FAILURE", "NETWORK_CHECK_NIC_FAILURE_DUE_TO_MISCONFIG", "NETWORK_CHECK_STORAGE_FAILURE", "NETWORK_CHECK_STORAGE_FAILURE_DUE_TO_MISCONFIG", "NETWORK_CONFIGURATION_FAILURE", "NFS_MOUNT_FAILURE", "NO_MATCHED_K8S", "NO_MATCHED_K8S_TESTING_TAG", "NPIP_TUNNEL_SETUP_FAILURE", "NPIP_TUNNEL_TOKEN_FAILURE", "POD_ASSIGNMENT_FAILURE", "POD_SCHEDULING_FAILURE", "REQUEST_REJECTED", "REQUEST_THROTTLED", "RESOURCE_USAGE_BLOCKED", "SECRET_CREATION_FAILURE", "SECRET_PERMISSION_DENIED", "SECRET_RESOLUTION_ERROR", "SECURITY_DAEMON_REGISTRATION_EXCEPTION", "SELF_BOOTSTRAP_FAILURE", "SERVERLESS_LONG_RUNNING_TERMINATED", "SKIPPED_SLOW_NODES", "SLOW_IMAGE_DOWNLOAD", "SPARK_ERROR", "SPARK_IMAGE_DOWNLOAD_FAILURE", "SPARK_IMAGE_DOWNLOAD_THROTTLED", "SPARK_IMAGE_NOT_FOUND", "SPARK_STARTUP_FAILURE", "SPOT_INSTANCE_TERMINATION", "SSH_BOOTSTRAP_FAILURE", "STORAGE_DOWNLOAD_FAILURE", "STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG", "STORAGE_DOWNLOAD_FAILURE_SLOW", "STORAGE_DOWNLOAD_FAILURE_THROTTLED", "STS_CLIENT_SETUP_FAILURE", "SUBNET_EXHAUSTED_FAILURE", "TEMPORARILY_UNAVAILABLE", "TRIAL_EXPIRED", "UNEXPECTED_LAUNCH_FAILURE", "UNEXPECTED_POD_RECREATION", "UNKNOWN", "UNSUPPORTED_INSTANCE_TYPE", "UPDATE_INSTANCE_PROFILE_FAILURE", "USAGE_POLICY_ENTITLEMENT_DENIED", "USER_INITIATED_VM_TERMINATION", "USER_REQUEST", "WORKER_SETUP_FAILURE", "WORKSPACE_CANCELLED_ERROR", "WORKSPACE_CONFIGURATION_ERROR", "WORKSPACE_UPDATE"`, v) } } @@ -5568,7 +5561,6 @@ func (f *TerminationReasonCode) Values() []TerminationReasonCode { TerminationReasonCodeDockerImagePullFailure, TerminationReasonCodeDockerImageTooLargeForInstanceException, TerminationReasonCodeDockerInvalidOsException, - TerminationReasonCodeDriverDnsResolutionFailure, TerminationReasonCodeDriverEviction, TerminationReasonCodeDriverLaunchTimeout, TerminationReasonCodeDriverNodeUnreachable, @@ -5643,8 +5635,6 @@ func (f *TerminationReasonCode) Values() []TerminationReasonCode { TerminationReasonCodeNetworkCheckStorageFailureDueToMisconfig, TerminationReasonCodeNetworkConfigurationFailure, TerminationReasonCodeNfsMountFailure, - TerminationReasonCodeNoActivatedK8s, - TerminationReasonCodeNoActivatedK8sTestingTag, TerminationReasonCodeNoMatchedK8s, TerminationReasonCodeNoMatchedK8sTestingTag, TerminationReasonCodeNpipTunnelSetupFailure, @@ -5657,7 +5647,6 @@ func (f *TerminationReasonCode) Values() []TerminationReasonCode { TerminationReasonCodeSecretCreationFailure, TerminationReasonCodeSecretPermissionDenied, TerminationReasonCodeSecretResolutionError, - TerminationReasonCodeSecurityAgentsFailedInitialVerification, TerminationReasonCodeSecurityDaemonRegistrationException, TerminationReasonCodeSelfBootstrapFailure, TerminationReasonCodeServerlessLongRunningTerminated, diff --git a/service/vectorsearch/api.go b/service/vectorsearch/api.go index 505de8214..837c32589 100755 --- a/service/vectorsearch/api.go +++ b/service/vectorsearch/api.go @@ -53,6 +53,9 @@ type VectorSearchEndpointsInterface interface { // This method is generated by Databricks SDK Code Generator. ListEndpointsAll(ctx context.Context, request ListEndpointsRequest) ([]EndpointInfo, error) + // Retrieve user-visible metrics for an endpoint + RetrieveUserVisibleMetrics(ctx context.Context, request RetrieveUserVisibleMetricsRequest) (*RetrieveUserVisibleMetricsResponse, error) + // Update the budget policy of an endpoint UpdateEndpointBudgetPolicy(ctx context.Context, request PatchEndpointBudgetPolicyRequest) (*PatchEndpointBudgetPolicyResponse, error) diff --git a/service/vectorsearch/impl.go b/service/vectorsearch/impl.go index dfbc05884..78848be27 100755 --- a/service/vectorsearch/impl.go +++ b/service/vectorsearch/impl.go @@ -88,6 +88,17 @@ func (a *vectorSearchEndpointsImpl) internalListEndpoints(ctx context.Context, r return &listEndpointResponse, err } +func (a *vectorSearchEndpointsImpl) RetrieveUserVisibleMetrics(ctx context.Context, request RetrieveUserVisibleMetricsRequest) (*RetrieveUserVisibleMetricsResponse, error) { + var retrieveUserVisibleMetricsResponse RetrieveUserVisibleMetricsResponse + path := fmt.Sprintf("/api/2.0/vector-search/endpoints/%v/metrics", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &retrieveUserVisibleMetricsResponse) + return &retrieveUserVisibleMetricsResponse, err +} + func (a *vectorSearchEndpointsImpl) UpdateEndpointBudgetPolicy(ctx context.Context, request PatchEndpointBudgetPolicyRequest) (*PatchEndpointBudgetPolicyResponse, error) { var patchEndpointBudgetPolicyResponse PatchEndpointBudgetPolicyResponse path := fmt.Sprintf("/api/2.0/vector-search/endpoints/%v/budget-policy", request.EndpointName) diff --git a/service/vectorsearch/interface.go b/service/vectorsearch/interface.go index a5d4569da..3074b650c 100755 --- a/service/vectorsearch/interface.go +++ b/service/vectorsearch/interface.go @@ -23,6 +23,9 @@ type VectorSearchEndpointsService interface { // List all vector search endpoints in the workspace. ListEndpoints(ctx context.Context, request ListEndpointsRequest) (*ListEndpointResponse, error) + // Retrieve user-visible metrics for an endpoint + RetrieveUserVisibleMetrics(ctx context.Context, request RetrieveUserVisibleMetricsRequest) (*RetrieveUserVisibleMetricsResponse, error) + // Update the budget policy of an endpoint UpdateEndpointBudgetPolicy(ctx context.Context, request PatchEndpointBudgetPolicyRequest) (*PatchEndpointBudgetPolicyResponse, error) diff --git a/service/vectorsearch/model.go b/service/vectorsearch/model.go index 2e8076edd..c21e80d47 100755 --- a/service/vectorsearch/model.go +++ b/service/vectorsearch/model.go @@ -11,6 +11,8 @@ import ( type ColumnInfo struct { // Name of the column. Name string `json:"name,omitempty"` + // Data type of the column (e.g., "string", "int", "array") + TypeText string `json:"type_text,omitempty"` ForceSendFields []string `json:"-" url:"-"` } @@ -348,6 +350,10 @@ const EndpointStatusStateOnline EndpointStatusState = `ONLINE` const EndpointStatusStateProvisioning EndpointStatusState = `PROVISIONING` +const EndpointStatusStateRedState EndpointStatusState = `RED_STATE` + +const EndpointStatusStateYellowState EndpointStatusState = `YELLOW_STATE` + // String representation for [fmt.Print] func (f *EndpointStatusState) String() string { return string(*f) @@ -356,11 +362,11 @@ func (f *EndpointStatusState) String() string { // Set raw string value and validate it against allowed values func (f *EndpointStatusState) Set(v string) error { switch v { - case `OFFLINE`, `ONLINE`, `PROVISIONING`: + case `OFFLINE`, `ONLINE`, `PROVISIONING`, `RED_STATE`, `YELLOW_STATE`: *f = EndpointStatusState(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "OFFLINE", "ONLINE", "PROVISIONING"`, v) + return fmt.Errorf(`value "%s" is not one of "OFFLINE", "ONLINE", "PROVISIONING", "RED_STATE", "YELLOW_STATE"`, v) } } @@ -372,6 +378,8 @@ func (f *EndpointStatusState) Values() []EndpointStatusState { EndpointStatusStateOffline, EndpointStatusStateOnline, EndpointStatusStateProvisioning, + EndpointStatusStateRedState, + EndpointStatusStateYellowState, } } @@ -532,6 +540,70 @@ func (s MapStringValueEntry) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Metric specification +type Metric struct { + // Metric labels + Labels []MetricLabel `json:"labels,omitempty"` + // Metric name + Name string `json:"name,omitempty"` + // Percentile for the metric + Percentile float64 `json:"percentile,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *Metric) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Metric) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Label for a metric +type MetricLabel struct { + // Label name + Name string `json:"name,omitempty"` + // Label value + Value string `json:"value,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *MetricLabel) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s MetricLabel) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Single metric value at a specific timestamp +type MetricValue struct { + // Timestamp of the metric value (milliseconds since epoch) + Timestamp int64 `json:"timestamp,omitempty"` + // Metric value + Value float64 `json:"value,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *MetricValue) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s MetricValue) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Collection of metric values for a specific metric +type MetricValues struct { + // Metric specification + Metric *Metric `json:"metric,omitempty"` + // Time series of metric values + Values []MetricValue `json:"values,omitempty"` +} + type MiniVectorIndex struct { // The user who created the index. Creator string `json:"creator,omitempty"` @@ -668,7 +740,8 @@ type QueryVectorIndexRequest struct { NumResults int `json:"num_results,omitempty"` // Query text. Required for Delta Sync Index using model endpoint. QueryText string `json:"query_text,omitempty"` - // The query type to use. Choices are `ANN` and `HYBRID`. Defaults to `ANN`. + // The query type to use. Choices are `ANN` and `HYBRID` and `FULL_TEXT`. + // Defaults to `ANN`. QueryType string `json:"query_type,omitempty"` // Query vector. Required for Direct Vector Access Index and Delta Sync // Index using self-managed vectors. @@ -767,6 +840,51 @@ func (s ResultManifest) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Request to retrieve user-visible metrics +type RetrieveUserVisibleMetricsRequest struct { + // End time for metrics query + EndTime string `json:"end_time,omitempty"` + // Granularity in seconds + GranularityInSeconds int `json:"granularity_in_seconds,omitempty"` + // List of metrics to retrieve + Metrics []Metric `json:"metrics,omitempty"` + // Vector search endpoint name + Name string `json:"-" url:"-"` + // Token for pagination + PageToken string `json:"page_token,omitempty"` + // Start time for metrics query + StartTime string `json:"start_time,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *RetrieveUserVisibleMetricsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RetrieveUserVisibleMetricsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Response containing user-visible metrics +type RetrieveUserVisibleMetricsResponse struct { + // Collection of metric values + MetricValues []MetricValues `json:"metric_values,omitempty"` + // A token that can be used to get the next page of results. If not present, + // there are no more results to show. + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *RetrieveUserVisibleMetricsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RetrieveUserVisibleMetricsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type ScanVectorIndexRequest struct { // Name of the vector index to scan. IndexName string `json:"-" url:"-"` diff --git a/service/workspace/model.go b/service/workspace/model.go index e31212d0a..6cddaa861 100755 --- a/service/workspace/model.go +++ b/service/workspace/model.go @@ -523,7 +523,9 @@ type GetStatusRequest struct { type GetWorkspaceObjectPermissionLevelsRequest struct { // The workspace object for which to get or manage permissions. WorkspaceObjectId string `json:"-" url:"-"` - // The workspace object type for which to get or manage permissions. + // The workspace object type for which to get or manage permissions. Could + // be one of the following: alerts, alertsv2, dashboards, dbsql-dashboards, + // directories, experiments, files, genie, notebooks, queries WorkspaceObjectType string `json:"-" url:"-"` } @@ -535,7 +537,9 @@ type GetWorkspaceObjectPermissionLevelsResponse struct { type GetWorkspaceObjectPermissionsRequest struct { // The workspace object for which to get or manage permissions. WorkspaceObjectId string `json:"-" url:"-"` - // The workspace object type for which to get or manage permissions. + // The workspace object type for which to get or manage permissions. Could + // be one of the following: alerts, alertsv2, dashboards, dbsql-dashboards, + // directories, experiments, files, genie, notebooks, queries WorkspaceObjectType string `json:"-" url:"-"` } @@ -1378,6 +1382,8 @@ type WorkspaceObjectPermissionsRequest struct { AccessControlList []WorkspaceObjectAccessControlRequest `json:"access_control_list,omitempty"` // The workspace object for which to get or manage permissions. WorkspaceObjectId string `json:"-" url:"-"` - // The workspace object type for which to get or manage permissions. + // The workspace object type for which to get or manage permissions. Could + // be one of the following: alerts, alertsv2, dashboards, dbsql-dashboards, + // directories, experiments, files, genie, notebooks, queries WorkspaceObjectType string `json:"-" url:"-"` } diff --git a/workspace_client.go b/workspace_client.go index cf1c2a83f..122055859 100755 --- a/workspace_client.go +++ b/workspace_client.go @@ -506,7 +506,7 @@ type WorkspaceClient struct { // management, monitoring, and error reporting for all of your jobs. You can // run your jobs immediately or periodically through an easy-to-use // scheduling system. You can implement job tasks using notebooks, JARS, - // Delta Live Tables pipelines, or Python, Scala, Spark submit, and Java + // Spark Declarative Pipelines, or Python, Scala, Spark submit, and Java // applications. // // You should never hard code secrets or store them in plain text. Use the @@ -638,20 +638,21 @@ type WorkspaceClient struct { // [Access Control]: https://docs.databricks.com/security/auth-authz/access-control/index.html Permissions iam.PermissionsInterface - // The Delta Live Tables API allows you to create, edit, delete, start, and - // view details about pipelines. + // The Lakeflow Spark Declarative Pipelines API allows you to create, edit, + // delete, start, and view details about pipelines. // - // Delta Live Tables is a framework for building reliable, maintainable, and - // testable data processing pipelines. You define the transformations to - // perform on your data, and Delta Live Tables manages task orchestration, - // cluster management, monitoring, data quality, and error handling. + // Spark Declarative Pipelines is a framework for building reliable, + // maintainable, and testable data processing pipelines. You define the + // transformations to perform on your data, and Spark Declarative Pipelines + // manages task orchestration, cluster management, monitoring, data quality, + // and error handling. // // Instead of defining your data pipelines using a series of separate Apache - // Spark tasks, Delta Live Tables manages how your data is transformed based - // on a target schema you define for each processing step. You can also - // enforce data quality with Delta Live Tables expectations. Expectations - // allow you to define expected data quality and specify how to handle - // records that fail those expectations. + // Spark tasks, Spark Declarative Pipelines manages how your data is + // transformed based on a target schema you define for each processing step. + // You can also enforce data quality with Spark Declarative Pipelines + // expectations. Expectations allow you to define expected data quality and + // specify how to handle records that fail those expectations. Pipelines pipelines.PipelinesInterface // Attribute-Based Access Control (ABAC) provides high leverage governance @@ -902,13 +903,11 @@ type WorkspaceClient struct { // [Unity Catalog documentation]: https://docs.databricks.com/en/data-governance/unity-catalog/index.html#resource-quotas ResourceQuotas catalog.ResourceQuotasInterface - // Request for Access enables customers to request access to and manage - // access request destinations for Unity Catalog securables. + // Request for Access enables users to request access for Unity Catalog + // securables. // - // These APIs provide a standardized way to update, get, and request to - // access request destinations. Fine-grained authorization ensures that only - // users with appropriate permissions can manage access request - // destinations. + // These APIs provide a standardized way for securable owners (or users with + // MANAGE privileges) to manage access request destinations. Rfa catalog.RfaInterface // A schema (also called a database) is the second layer of Unity