diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 19009050a..df14dfaaa 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -59c4c0f3d5f0ef00cd5350b5674e941a7606d91a \ No newline at end of file +file:/home/tejas.kochar/universe/bazel-bin/openapi/all-internal.json \ No newline at end of file diff --git a/experimental/mocks/service/ml/mock_feature_engineering_interface.go b/experimental/mocks/service/ml/mock_feature_engineering_interface.go index 478f94fff..af59cd3df 100644 --- a/experimental/mocks/service/ml/mock_feature_engineering_interface.go +++ b/experimental/mocks/service/ml/mock_feature_engineering_interface.go @@ -24,6 +24,65 @@ func (_m *MockFeatureEngineeringInterface) EXPECT() *MockFeatureEngineeringInter return &MockFeatureEngineeringInterface_Expecter{mock: &_m.Mock} } +// BatchCreateMaterializedFeatures provides a mock function with given fields: ctx, request +func (_m *MockFeatureEngineeringInterface) BatchCreateMaterializedFeatures(ctx context.Context, request ml.BatchCreateMaterializedFeaturesRequest) (*ml.BatchCreateMaterializedFeaturesResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for BatchCreateMaterializedFeatures") + } + + var r0 *ml.BatchCreateMaterializedFeaturesResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ml.BatchCreateMaterializedFeaturesRequest) (*ml.BatchCreateMaterializedFeaturesResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, ml.BatchCreateMaterializedFeaturesRequest) *ml.BatchCreateMaterializedFeaturesResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ml.BatchCreateMaterializedFeaturesResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ml.BatchCreateMaterializedFeaturesRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockFeatureEngineeringInterface_BatchCreateMaterializedFeatures_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BatchCreateMaterializedFeatures' +type MockFeatureEngineeringInterface_BatchCreateMaterializedFeatures_Call struct { + *mock.Call +} + +// BatchCreateMaterializedFeatures is a helper method to define mock.On call +// - ctx context.Context +// - request ml.BatchCreateMaterializedFeaturesRequest +func (_e *MockFeatureEngineeringInterface_Expecter) BatchCreateMaterializedFeatures(ctx interface{}, request interface{}) *MockFeatureEngineeringInterface_BatchCreateMaterializedFeatures_Call { + return &MockFeatureEngineeringInterface_BatchCreateMaterializedFeatures_Call{Call: _e.mock.On("BatchCreateMaterializedFeatures", ctx, request)} +} + +func (_c *MockFeatureEngineeringInterface_BatchCreateMaterializedFeatures_Call) Run(run func(ctx context.Context, request ml.BatchCreateMaterializedFeaturesRequest)) *MockFeatureEngineeringInterface_BatchCreateMaterializedFeatures_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ml.BatchCreateMaterializedFeaturesRequest)) + }) + return _c +} + +func (_c *MockFeatureEngineeringInterface_BatchCreateMaterializedFeatures_Call) Return(_a0 *ml.BatchCreateMaterializedFeaturesResponse, _a1 error) *MockFeatureEngineeringInterface_BatchCreateMaterializedFeatures_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockFeatureEngineeringInterface_BatchCreateMaterializedFeatures_Call) RunAndReturn(run func(context.Context, ml.BatchCreateMaterializedFeaturesRequest) (*ml.BatchCreateMaterializedFeaturesResponse, error)) *MockFeatureEngineeringInterface_BatchCreateMaterializedFeatures_Call { + _c.Call.Return(run) + return _c +} + // CreateFeature provides a mock function with given fields: ctx, request func (_m *MockFeatureEngineeringInterface) CreateFeature(ctx context.Context, request ml.CreateFeatureRequest) (*ml.Feature, error) { ret := _m.Called(ctx, request) diff --git a/experimental/mocks/service/vectorsearch/mock_vector_search_endpoints_interface.go b/experimental/mocks/service/vectorsearch/mock_vector_search_endpoints_interface.go index 9cc2b4d0f..d2197cfe4 100644 --- a/experimental/mocks/service/vectorsearch/mock_vector_search_endpoints_interface.go +++ b/experimental/mocks/service/vectorsearch/mock_vector_search_endpoints_interface.go @@ -481,6 +481,65 @@ func (_c *MockVectorSearchEndpointsInterface_ListEndpointsAll_Call) RunAndReturn return _c } +// RetrieveUserVisibleMetrics provides a mock function with given fields: ctx, request +func (_m *MockVectorSearchEndpointsInterface) RetrieveUserVisibleMetrics(ctx context.Context, request vectorsearch.RetrieveUserVisibleMetricsRequest) (*vectorsearch.RetrieveUserVisibleMetricsResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for RetrieveUserVisibleMetrics") + } + + var r0 *vectorsearch.RetrieveUserVisibleMetricsResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, vectorsearch.RetrieveUserVisibleMetricsRequest) (*vectorsearch.RetrieveUserVisibleMetricsResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, vectorsearch.RetrieveUserVisibleMetricsRequest) *vectorsearch.RetrieveUserVisibleMetricsResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vectorsearch.RetrieveUserVisibleMetricsResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, vectorsearch.RetrieveUserVisibleMetricsRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockVectorSearchEndpointsInterface_RetrieveUserVisibleMetrics_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RetrieveUserVisibleMetrics' +type MockVectorSearchEndpointsInterface_RetrieveUserVisibleMetrics_Call struct { + *mock.Call +} + +// RetrieveUserVisibleMetrics is a helper method to define mock.On call +// - ctx context.Context +// - request vectorsearch.RetrieveUserVisibleMetricsRequest +func (_e *MockVectorSearchEndpointsInterface_Expecter) RetrieveUserVisibleMetrics(ctx interface{}, request interface{}) *MockVectorSearchEndpointsInterface_RetrieveUserVisibleMetrics_Call { + return &MockVectorSearchEndpointsInterface_RetrieveUserVisibleMetrics_Call{Call: _e.mock.On("RetrieveUserVisibleMetrics", ctx, request)} +} + +func (_c *MockVectorSearchEndpointsInterface_RetrieveUserVisibleMetrics_Call) Run(run func(ctx context.Context, request vectorsearch.RetrieveUserVisibleMetricsRequest)) *MockVectorSearchEndpointsInterface_RetrieveUserVisibleMetrics_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(vectorsearch.RetrieveUserVisibleMetricsRequest)) + }) + return _c +} + +func (_c *MockVectorSearchEndpointsInterface_RetrieveUserVisibleMetrics_Call) Return(_a0 *vectorsearch.RetrieveUserVisibleMetricsResponse, _a1 error) *MockVectorSearchEndpointsInterface_RetrieveUserVisibleMetrics_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockVectorSearchEndpointsInterface_RetrieveUserVisibleMetrics_Call) RunAndReturn(run func(context.Context, vectorsearch.RetrieveUserVisibleMetricsRequest) (*vectorsearch.RetrieveUserVisibleMetricsResponse, error)) *MockVectorSearchEndpointsInterface_RetrieveUserVisibleMetrics_Call { + _c.Call.Return(run) + return _c +} + // UpdateEndpointBudgetPolicy provides a mock function with given fields: ctx, request func (_m *MockVectorSearchEndpointsInterface) UpdateEndpointBudgetPolicy(ctx context.Context, request vectorsearch.PatchEndpointBudgetPolicyRequest) (*vectorsearch.PatchEndpointBudgetPolicyResponse, error) { ret := _m.Called(ctx, request) diff --git a/internal/generatedtests/json_marshall_test.go b/internal/generatedtests/json_marshall_test.go index 996934e0e..eb0c39618 100755 --- a/internal/generatedtests/json_marshall_test.go +++ b/internal/generatedtests/json_marshall_test.go @@ -204,7 +204,7 @@ func TestJsonMarshall(t *testing.T) { RequiredString: "non_default_string", RequiredStruct: map[string]json.RawMessage{}, RequiredTimestamp: *time.New(timeFromString("2023-12-31T23:59:59Z")), - RequiredValue: json.RawMessage("{}"), + RequiredValue: json.RawMessage("{\"key\": \"value\"}"), TestRequiredEnum: jsonmarshallv2.TestEnumTestEnumTwo, }, want: `{ @@ -212,7 +212,7 @@ func TestJsonMarshall(t *testing.T) { "required_int32": 42, "required_int64": 1234567890123456789, "required_bool": true, - "required_value": {}, + "required_value": {"key": "value"}, "required_list_value": [], "required_struct": {}, "required_message": {}, diff --git a/internal/testspecs/service/lrotesting/model.go b/internal/testspecs/service/lrotesting/model.go index 805c57808..34ef4fb41 100755 --- a/internal/testspecs/service/lrotesting/model.go +++ b/internal/testspecs/service/lrotesting/model.go @@ -19,12 +19,7 @@ type CreateTestResourceRequest struct { Resource TestResource `json:"resource"` } -// Serialization format for DatabricksServiceException with error details. This -// message doesn't work for ScalaPB-04 as google.protobuf.Any is only available -// to ScalaPB-09. Note the definition of this message should be in sync with -// DatabricksServiceExceptionProto defined in -// /api-base/proto/legacy/databricks.proto except the later one doesn't have the -// error details field defined. +// Databricks Error that is returned by all Databricks APIs. type DatabricksServiceExceptionWithDetailsProto struct { // @pbjson-skip Details []json.RawMessage `json:"details,omitempty"` @@ -350,24 +345,13 @@ type Operation struct { Error *DatabricksServiceExceptionWithDetailsProto `json:"error,omitempty"` // Service-specific metadata associated with the operation. It typically // contains progress information and common metadata such as create time. - // Some services might not provide such metadata. Any method that returns a - // long-running operation should document the metadata type, if any. + // Some services might not provide such metadata. Metadata json.RawMessage `json:"metadata,omitempty"` // The server-assigned name, which is only unique within the same service // that originally returns it. If you use the default HTTP mapping, the // `name` should be a resource name ending with `operations/{unique_id}`. - // - // Note: multi-segment resource names are not yet supported in the RPC - // framework and SDK/TF. Until that support is added, `name` must be string - // without internal `/` separators. Name string `json:"name,omitempty"` - // The normal, successful response of the operation. If the original method - // returns no data on success, such as `Delete`, the response is - // `google.protobuf.Empty`. If the original method is standard - // `Get`/`Create`/`Update`, the response should be the resource. For other - // methods, the response should have the type `XxxResponse`, where `Xxx` is - // the original method name. For example, if the original method name is - // `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`. + // The normal, successful response of the operation. Response json.RawMessage `json:"response,omitempty"` ForceSendFields []string `json:"-" url:"-"` diff --git a/service/catalog/impl.go b/service/catalog/impl.go index 6584eeee1..9a46b7263 100755 --- a/service/catalog/impl.go +++ b/service/catalog/impl.go @@ -340,6 +340,8 @@ func (a *catalogsImpl) Get(ctx context.Context, request GetCatalogRequest) (*Cat // indication that the end of results has been reached. func (a *catalogsImpl) List(ctx context.Context, request ListCatalogsRequest) listing.Iterator[CatalogInfo] { + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + getNextPage := func(ctx context.Context, req ListCatalogsRequest) (*ListCatalogsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") return a.internalList(ctx, req) @@ -447,6 +449,8 @@ func (a *connectionsImpl) Get(ctx context.Context, request GetConnectionRequest) // indication that the end of results has been reached. func (a *connectionsImpl) List(ctx context.Context, request ListConnectionsRequest) listing.Iterator[ConnectionInfo] { + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + getNextPage := func(ctx context.Context, req ListConnectionsRequest) (*ListConnectionsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") return a.internalList(ctx, req) @@ -867,6 +871,8 @@ func (a *externalLocationsImpl) Get(ctx context.Context, request GetExternalLoca // indication that the end of results has been reached. func (a *externalLocationsImpl) List(ctx context.Context, request ListExternalLocationsRequest) listing.Iterator[ExternalLocationInfo] { + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + getNextPage := func(ctx context.Context, req ListExternalLocationsRequest) (*ListExternalLocationsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") return a.internalList(ctx, req) @@ -1078,6 +1084,8 @@ func (a *functionsImpl) Get(ctx context.Context, request GetFunctionRequest) (*F // indication that the end of results has been reached. func (a *functionsImpl) List(ctx context.Context, request ListFunctionsRequest) listing.Iterator[FunctionInfo] { + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + getNextPage := func(ctx context.Context, req ListFunctionsRequest) (*ListFunctionsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") return a.internalList(ctx, req) @@ -1245,6 +1253,8 @@ func (a *metastoresImpl) Get(ctx context.Context, request GetMetastoreRequest) ( // indication that the end of results has been reached. func (a *metastoresImpl) List(ctx context.Context, request ListMetastoresRequest) listing.Iterator[MetastoreInfo] { + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + getNextPage := func(ctx context.Context, req ListMetastoresRequest) (*ListMetastoresResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") return a.internalList(ctx, req) @@ -1986,6 +1996,8 @@ func (a *schemasImpl) Get(ctx context.Context, request GetSchemaRequest) (*Schem // indication that the end of results has been reached. func (a *schemasImpl) List(ctx context.Context, request ListSchemasRequest) listing.Iterator[SchemaInfo] { + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + getNextPage := func(ctx context.Context, req ListSchemasRequest) (*ListSchemasResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") return a.internalList(ctx, req) @@ -2097,6 +2109,8 @@ func (a *storageCredentialsImpl) Get(ctx context.Context, request GetStorageCred // indication that the end of results has been reached. func (a *storageCredentialsImpl) List(ctx context.Context, request ListStorageCredentialsRequest) listing.Iterator[StorageCredentialInfo] { + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + getNextPage := func(ctx context.Context, req ListStorageCredentialsRequest) (*ListStorageCredentialsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") return a.internalList(ctx, req) @@ -2205,6 +2219,8 @@ func (a *systemSchemasImpl) Enable(ctx context.Context, request EnableRequest) e // indication that the end of results has been reached. func (a *systemSchemasImpl) List(ctx context.Context, request ListSystemSchemasRequest) listing.Iterator[SystemSchemaInfo] { + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + getNextPage := func(ctx context.Context, req ListSystemSchemasRequest) (*ListSystemSchemasResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") return a.internalList(ctx, req) @@ -2338,6 +2354,8 @@ func (a *tablesImpl) Get(ctx context.Context, request GetTableRequest) (*TableIn // indication that the end of results has been reached. func (a *tablesImpl) List(ctx context.Context, request ListTablesRequest) listing.Iterator[TableInfo] { + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + getNextPage := func(ctx context.Context, req ListTablesRequest) (*ListTablesResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") return a.internalList(ctx, req) @@ -2645,6 +2663,8 @@ func (a *workspaceBindingsImpl) Get(ctx context.Context, request GetWorkspaceBin // indication that the end of results has been reached. func (a *workspaceBindingsImpl) GetBindings(ctx context.Context, request GetBindingsRequest) listing.Iterator[WorkspaceBinding] { + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + getNextPage := func(ctx context.Context, req GetBindingsRequest) (*GetWorkspaceBindingsResponse, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") return a.internalGetBindings(ctx, req) diff --git a/service/compute/model.go b/service/compute/model.go index f93551947..cd738b421 100755 --- a/service/compute/model.go +++ b/service/compute/model.go @@ -2770,6 +2770,8 @@ const EventDetailsCauseAutorecovery EventDetailsCause = `AUTORECOVERY` const EventDetailsCauseAutoscale EventDetailsCause = `AUTOSCALE` +const EventDetailsCauseAutoscaleV2 EventDetailsCause = `AUTOSCALE_V2` + const EventDetailsCauseReplaceBadNodes EventDetailsCause = `REPLACE_BAD_NODES` const EventDetailsCauseUserRequest EventDetailsCause = `USER_REQUEST` @@ -2782,11 +2784,11 @@ func (f *EventDetailsCause) String() string { // Set raw string value and validate it against allowed values func (f *EventDetailsCause) Set(v string) error { switch v { - case `AUTORECOVERY`, `AUTOSCALE`, `REPLACE_BAD_NODES`, `USER_REQUEST`: + case `AUTORECOVERY`, `AUTOSCALE`, `AUTOSCALE_V2`, `REPLACE_BAD_NODES`, `USER_REQUEST`: *f = EventDetailsCause(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "AUTORECOVERY", "AUTOSCALE", "REPLACE_BAD_NODES", "USER_REQUEST"`, v) + return fmt.Errorf(`value "%s" is not one of "AUTORECOVERY", "AUTOSCALE", "AUTOSCALE_V2", "REPLACE_BAD_NODES", "USER_REQUEST"`, v) } } @@ -2797,6 +2799,7 @@ func (f *EventDetailsCause) Values() []EventDetailsCause { return []EventDetailsCause{ EventDetailsCauseAutorecovery, EventDetailsCauseAutoscale, + EventDetailsCauseAutoscaleV2, EventDetailsCauseReplaceBadNodes, EventDetailsCauseUserRequest, } diff --git a/service/dashboards/model.go b/service/dashboards/model.go index 10f1b0605..79a0379e8 100755 --- a/service/dashboards/model.go +++ b/service/dashboards/model.go @@ -1028,6 +1028,8 @@ const MessageErrorTypeUnexpectedReplyProcessException MessageErrorType = `UNEXPE const MessageErrorTypeUnknownAiModel MessageErrorType = `UNKNOWN_AI_MODEL` +const MessageErrorTypeUnsupportedConversationTypeException MessageErrorType = `UNSUPPORTED_CONVERSATION_TYPE_EXCEPTION` + const MessageErrorTypeWarehouseAccessMissingException MessageErrorType = `WAREHOUSE_ACCESS_MISSING_EXCEPTION` const MessageErrorTypeWarehouseNotFoundException MessageErrorType = `WAREHOUSE_NOT_FOUND_EXCEPTION` @@ -1040,11 +1042,11 @@ func (f *MessageErrorType) String() string { // Set raw string value and validate it against allowed values func (f *MessageErrorType) Set(v string) error { switch v { - case `BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION`, `CHAT_COMPLETION_CLIENT_EXCEPTION`, `CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION`, `CHAT_COMPLETION_NETWORK_EXCEPTION`, `CONTENT_FILTER_EXCEPTION`, `CONTEXT_EXCEEDED_EXCEPTION`, `COULD_NOT_GET_MODEL_DEPLOYMENTS_EXCEPTION`, `COULD_NOT_GET_UC_SCHEMA_EXCEPTION`, `DEPLOYMENT_NOT_FOUND_EXCEPTION`, `DESCRIBE_QUERY_INVALID_SQL_ERROR`, `DESCRIBE_QUERY_TIMEOUT`, `DESCRIBE_QUERY_UNEXPECTED_FAILURE`, `EXCEEDED_MAX_TOKEN_LENGTH_EXCEPTION`, `FUNCTIONS_NOT_AVAILABLE_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_TYPE_EXCEPTION`, `FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION`, `GENERATED_SQL_QUERY_TOO_LONG_EXCEPTION`, `GENERIC_CHAT_COMPLETION_EXCEPTION`, `GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION`, `GENERIC_SQL_EXEC_API_CALL_EXCEPTION`, `ILLEGAL_PARAMETER_DEFINITION_EXCEPTION`, `INTERNAL_CATALOG_ASSET_CREATION_FAILED_EXCEPTION`, `INTERNAL_CATALOG_ASSET_CREATION_ONGOING_EXCEPTION`, `INTERNAL_CATALOG_ASSET_CREATION_UNSUPPORTED_EXCEPTION`, `INTERNAL_CATALOG_MISSING_UC_PATH_EXCEPTION`, `INTERNAL_CATALOG_PATH_OVERLAP_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION`, `INVALID_CHAT_COMPLETION_ARGUMENTS_JSON_EXCEPTION`, `INVALID_CHAT_COMPLETION_JSON_EXCEPTION`, `INVALID_COMPLETION_REQUEST_EXCEPTION`, `INVALID_FUNCTION_CALL_EXCEPTION`, `INVALID_SQL_MULTIPLE_DATASET_REFERENCES_EXCEPTION`, `INVALID_SQL_MULTIPLE_STATEMENTS_EXCEPTION`, `INVALID_SQL_UNKNOWN_TABLE_EXCEPTION`, `INVALID_TABLE_IDENTIFIER_EXCEPTION`, `LOCAL_CONTEXT_EXCEEDED_EXCEPTION`, `MESSAGE_ATTACHMENT_TOO_LONG_ERROR`, `MESSAGE_CANCELLED_WHILE_EXECUTING_EXCEPTION`, `MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION`, `MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION`, `MISSING_SQL_QUERY_EXCEPTION`, `NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE`, `NO_QUERY_TO_VISUALIZE_EXCEPTION`, `NO_TABLES_TO_QUERY_EXCEPTION`, `RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION`, `RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION`, `REPLY_PROCESS_TIMEOUT_EXCEPTION`, `RETRYABLE_PROCESSING_EXCEPTION`, `SQL_EXECUTION_EXCEPTION`, `STOP_PROCESS_DUE_TO_AUTO_REGENERATE`, `TABLES_MISSING_EXCEPTION`, `TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION`, `TOO_MANY_TABLES_EXCEPTION`, `UNEXPECTED_REPLY_PROCESS_EXCEPTION`, `UNKNOWN_AI_MODEL`, `WAREHOUSE_ACCESS_MISSING_EXCEPTION`, `WAREHOUSE_NOT_FOUND_EXCEPTION`: + case `BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION`, `CHAT_COMPLETION_CLIENT_EXCEPTION`, `CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION`, `CHAT_COMPLETION_NETWORK_EXCEPTION`, `CONTENT_FILTER_EXCEPTION`, `CONTEXT_EXCEEDED_EXCEPTION`, `COULD_NOT_GET_MODEL_DEPLOYMENTS_EXCEPTION`, `COULD_NOT_GET_UC_SCHEMA_EXCEPTION`, `DEPLOYMENT_NOT_FOUND_EXCEPTION`, `DESCRIBE_QUERY_INVALID_SQL_ERROR`, `DESCRIBE_QUERY_TIMEOUT`, `DESCRIBE_QUERY_UNEXPECTED_FAILURE`, `EXCEEDED_MAX_TOKEN_LENGTH_EXCEPTION`, `FUNCTIONS_NOT_AVAILABLE_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_TYPE_EXCEPTION`, `FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION`, `GENERATED_SQL_QUERY_TOO_LONG_EXCEPTION`, `GENERIC_CHAT_COMPLETION_EXCEPTION`, `GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION`, `GENERIC_SQL_EXEC_API_CALL_EXCEPTION`, `ILLEGAL_PARAMETER_DEFINITION_EXCEPTION`, `INTERNAL_CATALOG_ASSET_CREATION_FAILED_EXCEPTION`, `INTERNAL_CATALOG_ASSET_CREATION_ONGOING_EXCEPTION`, `INTERNAL_CATALOG_ASSET_CREATION_UNSUPPORTED_EXCEPTION`, `INTERNAL_CATALOG_MISSING_UC_PATH_EXCEPTION`, `INTERNAL_CATALOG_PATH_OVERLAP_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION`, `INVALID_CHAT_COMPLETION_ARGUMENTS_JSON_EXCEPTION`, `INVALID_CHAT_COMPLETION_JSON_EXCEPTION`, `INVALID_COMPLETION_REQUEST_EXCEPTION`, `INVALID_FUNCTION_CALL_EXCEPTION`, `INVALID_SQL_MULTIPLE_DATASET_REFERENCES_EXCEPTION`, `INVALID_SQL_MULTIPLE_STATEMENTS_EXCEPTION`, `INVALID_SQL_UNKNOWN_TABLE_EXCEPTION`, `INVALID_TABLE_IDENTIFIER_EXCEPTION`, `LOCAL_CONTEXT_EXCEEDED_EXCEPTION`, `MESSAGE_ATTACHMENT_TOO_LONG_ERROR`, `MESSAGE_CANCELLED_WHILE_EXECUTING_EXCEPTION`, `MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION`, `MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION`, `MISSING_SQL_QUERY_EXCEPTION`, `NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE`, `NO_QUERY_TO_VISUALIZE_EXCEPTION`, `NO_TABLES_TO_QUERY_EXCEPTION`, `RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION`, `RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION`, `REPLY_PROCESS_TIMEOUT_EXCEPTION`, `RETRYABLE_PROCESSING_EXCEPTION`, `SQL_EXECUTION_EXCEPTION`, `STOP_PROCESS_DUE_TO_AUTO_REGENERATE`, `TABLES_MISSING_EXCEPTION`, `TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION`, `TOO_MANY_TABLES_EXCEPTION`, `UNEXPECTED_REPLY_PROCESS_EXCEPTION`, `UNKNOWN_AI_MODEL`, `UNSUPPORTED_CONVERSATION_TYPE_EXCEPTION`, `WAREHOUSE_ACCESS_MISSING_EXCEPTION`, `WAREHOUSE_NOT_FOUND_EXCEPTION`: *f = MessageErrorType(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION", "CHAT_COMPLETION_CLIENT_EXCEPTION", "CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION", "CHAT_COMPLETION_NETWORK_EXCEPTION", "CONTENT_FILTER_EXCEPTION", "CONTEXT_EXCEEDED_EXCEPTION", "COULD_NOT_GET_MODEL_DEPLOYMENTS_EXCEPTION", "COULD_NOT_GET_UC_SCHEMA_EXCEPTION", "DEPLOYMENT_NOT_FOUND_EXCEPTION", "DESCRIBE_QUERY_INVALID_SQL_ERROR", "DESCRIBE_QUERY_TIMEOUT", "DESCRIBE_QUERY_UNEXPECTED_FAILURE", "EXCEEDED_MAX_TOKEN_LENGTH_EXCEPTION", "FUNCTIONS_NOT_AVAILABLE_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_TYPE_EXCEPTION", "FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION", "GENERATED_SQL_QUERY_TOO_LONG_EXCEPTION", "GENERIC_CHAT_COMPLETION_EXCEPTION", "GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION", "GENERIC_SQL_EXEC_API_CALL_EXCEPTION", "ILLEGAL_PARAMETER_DEFINITION_EXCEPTION", "INTERNAL_CATALOG_ASSET_CREATION_FAILED_EXCEPTION", "INTERNAL_CATALOG_ASSET_CREATION_ONGOING_EXCEPTION", "INTERNAL_CATALOG_ASSET_CREATION_UNSUPPORTED_EXCEPTION", "INTERNAL_CATALOG_MISSING_UC_PATH_EXCEPTION", "INTERNAL_CATALOG_PATH_OVERLAP_EXCEPTION", "INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION", "INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION", "INVALID_CHAT_COMPLETION_ARGUMENTS_JSON_EXCEPTION", "INVALID_CHAT_COMPLETION_JSON_EXCEPTION", "INVALID_COMPLETION_REQUEST_EXCEPTION", "INVALID_FUNCTION_CALL_EXCEPTION", "INVALID_SQL_MULTIPLE_DATASET_REFERENCES_EXCEPTION", "INVALID_SQL_MULTIPLE_STATEMENTS_EXCEPTION", "INVALID_SQL_UNKNOWN_TABLE_EXCEPTION", "INVALID_TABLE_IDENTIFIER_EXCEPTION", "LOCAL_CONTEXT_EXCEEDED_EXCEPTION", "MESSAGE_ATTACHMENT_TOO_LONG_ERROR", "MESSAGE_CANCELLED_WHILE_EXECUTING_EXCEPTION", "MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION", "MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION", "MISSING_SQL_QUERY_EXCEPTION", "NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE", "NO_QUERY_TO_VISUALIZE_EXCEPTION", "NO_TABLES_TO_QUERY_EXCEPTION", "RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION", "RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION", "REPLY_PROCESS_TIMEOUT_EXCEPTION", "RETRYABLE_PROCESSING_EXCEPTION", "SQL_EXECUTION_EXCEPTION", "STOP_PROCESS_DUE_TO_AUTO_REGENERATE", "TABLES_MISSING_EXCEPTION", "TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION", "TOO_MANY_TABLES_EXCEPTION", "UNEXPECTED_REPLY_PROCESS_EXCEPTION", "UNKNOWN_AI_MODEL", "WAREHOUSE_ACCESS_MISSING_EXCEPTION", "WAREHOUSE_NOT_FOUND_EXCEPTION"`, v) + return fmt.Errorf(`value "%s" is not one of "BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION", "CHAT_COMPLETION_CLIENT_EXCEPTION", "CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION", "CHAT_COMPLETION_NETWORK_EXCEPTION", "CONTENT_FILTER_EXCEPTION", "CONTEXT_EXCEEDED_EXCEPTION", "COULD_NOT_GET_MODEL_DEPLOYMENTS_EXCEPTION", "COULD_NOT_GET_UC_SCHEMA_EXCEPTION", "DEPLOYMENT_NOT_FOUND_EXCEPTION", "DESCRIBE_QUERY_INVALID_SQL_ERROR", "DESCRIBE_QUERY_TIMEOUT", "DESCRIBE_QUERY_UNEXPECTED_FAILURE", "EXCEEDED_MAX_TOKEN_LENGTH_EXCEPTION", "FUNCTIONS_NOT_AVAILABLE_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_TYPE_EXCEPTION", "FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION", "GENERATED_SQL_QUERY_TOO_LONG_EXCEPTION", "GENERIC_CHAT_COMPLETION_EXCEPTION", "GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION", "GENERIC_SQL_EXEC_API_CALL_EXCEPTION", "ILLEGAL_PARAMETER_DEFINITION_EXCEPTION", "INTERNAL_CATALOG_ASSET_CREATION_FAILED_EXCEPTION", "INTERNAL_CATALOG_ASSET_CREATION_ONGOING_EXCEPTION", "INTERNAL_CATALOG_ASSET_CREATION_UNSUPPORTED_EXCEPTION", "INTERNAL_CATALOG_MISSING_UC_PATH_EXCEPTION", "INTERNAL_CATALOG_PATH_OVERLAP_EXCEPTION", "INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION", "INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION", "INVALID_CHAT_COMPLETION_ARGUMENTS_JSON_EXCEPTION", "INVALID_CHAT_COMPLETION_JSON_EXCEPTION", "INVALID_COMPLETION_REQUEST_EXCEPTION", "INVALID_FUNCTION_CALL_EXCEPTION", "INVALID_SQL_MULTIPLE_DATASET_REFERENCES_EXCEPTION", "INVALID_SQL_MULTIPLE_STATEMENTS_EXCEPTION", "INVALID_SQL_UNKNOWN_TABLE_EXCEPTION", "INVALID_TABLE_IDENTIFIER_EXCEPTION", "LOCAL_CONTEXT_EXCEEDED_EXCEPTION", "MESSAGE_ATTACHMENT_TOO_LONG_ERROR", "MESSAGE_CANCELLED_WHILE_EXECUTING_EXCEPTION", "MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION", "MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION", "MISSING_SQL_QUERY_EXCEPTION", "NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE", "NO_QUERY_TO_VISUALIZE_EXCEPTION", "NO_TABLES_TO_QUERY_EXCEPTION", "RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION", "RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION", "REPLY_PROCESS_TIMEOUT_EXCEPTION", "RETRYABLE_PROCESSING_EXCEPTION", "SQL_EXECUTION_EXCEPTION", "STOP_PROCESS_DUE_TO_AUTO_REGENERATE", "TABLES_MISSING_EXCEPTION", "TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION", "TOO_MANY_TABLES_EXCEPTION", "UNEXPECTED_REPLY_PROCESS_EXCEPTION", "UNKNOWN_AI_MODEL", "UNSUPPORTED_CONVERSATION_TYPE_EXCEPTION", "WAREHOUSE_ACCESS_MISSING_EXCEPTION", "WAREHOUSE_NOT_FOUND_EXCEPTION"`, v) } } @@ -1111,6 +1113,7 @@ func (f *MessageErrorType) Values() []MessageErrorType { MessageErrorTypeTooManyTablesException, MessageErrorTypeUnexpectedReplyProcessException, MessageErrorTypeUnknownAiModel, + MessageErrorTypeUnsupportedConversationTypeException, MessageErrorTypeWarehouseAccessMissingException, MessageErrorTypeWarehouseNotFoundException, } diff --git a/service/jobs/model.go b/service/jobs/model.go index 3954a27d3..1d6c404ed 100755 --- a/service/jobs/model.go +++ b/service/jobs/model.go @@ -5226,7 +5226,7 @@ type TableUpdateTriggerConfiguration struct { MinTimeBetweenTriggersSeconds int `json:"min_time_between_triggers_seconds,omitempty"` // A list of tables to monitor for changes. The table name must be in the // format `catalog_name.schema_name.table_name`. - TableNames []string `json:"table_names,omitempty"` + TableNames []string `json:"table_names"` // If set, the trigger starts a run only after no table updates have // occurred for the specified time and can be used to wait for a series of // table updates before triggering a run. The minimum allowed value is 60 diff --git a/service/marketplace/impl.go b/service/marketplace/impl.go index 459257581..87f640ad0 100755 --- a/service/marketplace/impl.go +++ b/service/marketplace/impl.go @@ -10,7 +10,6 @@ import ( "github.com/databricks/databricks-sdk-go/client" "github.com/databricks/databricks-sdk-go/listing" "github.com/databricks/databricks-sdk-go/useragent" - "github.com/google/uuid" ) // unexported type that holds implementations of just ConsumerFulfillments API methods @@ -953,9 +952,6 @@ func (a *providerPersonalizationRequestsImpl) internalList(ctx context.Context, func (a *providerPersonalizationRequestsImpl) Update(ctx context.Context, request UpdatePersonalizationRequestRequest) (*UpdatePersonalizationRequestResponse, error) { var updatePersonalizationRequestResponse UpdatePersonalizationRequestResponse - if request.RequestId == "" { - request.RequestId = uuid.New().String() - } path := fmt.Sprintf("/api/2.0/marketplace-provider/listings/%v/personalization-requests/%v/request-status", request.ListingId, request.RequestId) queryParams := make(map[string]any) headers := make(map[string]string) diff --git a/service/ml/api.go b/service/ml/api.go index 2ce87a5c9..e90ad71a4 100755 --- a/service/ml/api.go +++ b/service/ml/api.go @@ -356,6 +356,9 @@ func (a *ExperimentsAPI) GetPermissionsByExperimentId(ctx context.Context, exper type FeatureEngineeringInterface interface { + // Batch create materialized features. + BatchCreateMaterializedFeatures(ctx context.Context, request BatchCreateMaterializedFeaturesRequest) (*BatchCreateMaterializedFeaturesResponse, error) + // Create a Feature. CreateFeature(ctx context.Context, request CreateFeatureRequest) (*Feature, error) diff --git a/service/ml/impl.go b/service/ml/impl.go index 08bfd6aaa..ebeaaec53 100755 --- a/service/ml/impl.go +++ b/service/ml/impl.go @@ -592,6 +592,17 @@ type featureEngineeringImpl struct { client *client.DatabricksClient } +func (a *featureEngineeringImpl) BatchCreateMaterializedFeatures(ctx context.Context, request BatchCreateMaterializedFeaturesRequest) (*BatchCreateMaterializedFeaturesResponse, error) { + var batchCreateMaterializedFeaturesResponse BatchCreateMaterializedFeaturesResponse + path := "/api/2.0/feature-engineering/materialized-features:batchCreate" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &batchCreateMaterializedFeaturesResponse) + return &batchCreateMaterializedFeaturesResponse, err +} + func (a *featureEngineeringImpl) CreateFeature(ctx context.Context, request CreateFeatureRequest) (*Feature, error) { var feature Feature path := "/api/2.0/feature-engineering/features" diff --git a/service/ml/interface.go b/service/ml/interface.go index 7f2471d83..8d0f18413 100755 --- a/service/ml/interface.go +++ b/service/ml/interface.go @@ -257,6 +257,9 @@ type ExperimentsService interface { // Deprecated: Do not use this interface, it will be removed in a future version of the SDK. type FeatureEngineeringService interface { + // Batch create materialized features. + BatchCreateMaterializedFeatures(ctx context.Context, request BatchCreateMaterializedFeaturesRequest) (*BatchCreateMaterializedFeaturesResponse, error) + // Create a Feature. CreateFeature(ctx context.Context, request CreateFeatureRequest) (*Feature, error) diff --git a/service/ml/model.go b/service/ml/model.go index 190633a41..95efd9e80 100755 --- a/service/ml/model.go +++ b/service/ml/model.go @@ -238,6 +238,16 @@ type ApproveTransitionRequestResponse struct { Activity *Activity `json:"activity,omitempty"` } +type BatchCreateMaterializedFeaturesRequest struct { + // The requests to create materialized features. + Requests []CreateMaterializedFeatureRequest `json:"requests"` +} + +type BatchCreateMaterializedFeaturesResponse struct { + // The created materialized features with assigned IDs. + MaterializedFeatures []MaterializedFeature `json:"materialized_features,omitempty"` +} + // An action that a user (with sufficient permissions) could take on an activity // or comment. // @@ -1160,6 +1170,8 @@ type Feature struct { Function Function `json:"function"` // The input columns from which the feature is computed. Inputs []string `json:"inputs"` + // Lineage context information for this feature. + LineageContext *LineageContext `json:"lineage_context,omitempty"` // The data source of the feature. Source DataSource `json:"source"` // The time window in which the feature is computed. @@ -1727,6 +1739,23 @@ type InputTag struct { Value string `json:"value"` } +type JobContext struct { + // The job ID where this API invoked. + JobId int64 `json:"job_id,omitempty"` + // The job run ID where this API was invoked. + JobRunId int64 `json:"job_run_id,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *JobContext) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s JobContext) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type JobSpec struct { // The personal access token used to authorize webhook's job runs. AccessToken string `json:"access_token"` @@ -1767,6 +1796,26 @@ func (s JobSpecWithoutSecret) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Lineage context information for tracking where an API was invoked. This will +// allow us to track lineage, which currently uses caller entity information for +// use across the Lineage Client and Observability in Lumberjack. +type LineageContext struct { + // Job context information including job ID and run ID. + JobContext *JobContext `json:"job_context,omitempty"` + // The notebook ID where this API was invoked. + NotebookId int64 `json:"notebook_id,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *LineageContext) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s LineageContext) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Feature for model version. ([ML-57150] Renamed from Feature to LinkedFeature) type LinkedFeature struct { // Feature name @@ -2411,7 +2460,7 @@ type MaterializedFeature struct { OfflineStoreConfig *OfflineStoreConfig `json:"offline_store_config,omitempty"` - OnlineStoreConfig *OnlineStore `json:"online_store_config,omitempty"` + OnlineStoreConfig *OnlineStoreConfig `json:"online_store_config,omitempty"` // The schedule state of the materialization pipeline. PipelineScheduleState MaterializedFeaturePipelineScheduleState `json:"pipeline_schedule_state,omitempty"` // The fully qualified Unity Catalog path to the table containing the @@ -2800,6 +2849,20 @@ func (s OnlineStore) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Configuration for online store destination. +type OnlineStoreConfig struct { + // The Unity Catalog catalog name. This name is also used as the Lakebase + // logical database name. + CatalogName string `json:"catalog_name"` + // The name of the target online store. + OnlineStoreName string `json:"online_store_name"` + // The Unity Catalog schema name. + SchemaName string `json:"schema_name"` + // Prefix for Unity Catalog table name. The materialized feature will be + // stored in a Lakebase table with this prefix and a generated postfix. + TableNamePrefix string `json:"table_name_prefix"` +} + type OnlineStoreState string const OnlineStoreStateAvailable OnlineStoreState = `AVAILABLE` diff --git a/service/pipelines/api.go b/service/pipelines/api.go index d843e562a..d343e33bb 100755 --- a/service/pipelines/api.go +++ b/service/pipelines/api.go @@ -1,19 +1,20 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// The Delta Live Tables API allows you to create, edit, delete, start, and view -// details about pipelines. +// The Lakeflow Spark Declarative Pipelines API allows you to create, edit, +// delete, start, and view details about pipelines. // -// Delta Live Tables is a framework for building reliable, maintainable, and -// testable data processing pipelines. You define the transformations to perform -// on your data, and Delta Live Tables manages task orchestration, cluster -// management, monitoring, data quality, and error handling. +// Spark Declarative Pipelines is a framework for building reliable, +// maintainable, and testable data processing pipelines. You define the +// transformations to perform on your data, and Spark Declarative Pipelines +// manages task orchestration, cluster management, monitoring, data quality, and +// error handling. // // Instead of defining your data pipelines using a series of separate Apache -// Spark tasks, Delta Live Tables manages how your data is transformed based on -// a target schema you define for each processing step. You can also enforce -// data quality with Delta Live Tables expectations. Expectations allow you to -// define expected data quality and specify how to handle records that fail -// those expectations. +// Spark tasks, Spark Declarative Pipelines manages how your data is transformed +// based on a target schema you define for each processing step. You can also +// enforce data quality with Spark Declarative Pipelines expectations. +// Expectations allow you to define expected data quality and specify how to +// handle records that fail those expectations. package pipelines import ( @@ -84,12 +85,12 @@ type PipelinesInterface interface { // Retrieves events for a pipeline. ListPipelineEventsByPipelineId(ctx context.Context, pipelineId string) (*ListPipelineEventsResponse, error) - // Lists pipelines defined in the Delta Live Tables system. + // Lists pipelines defined in the Spark Declarative Pipelines system. // // This method is generated by Databricks SDK Code Generator. ListPipelines(ctx context.Context, request ListPipelinesRequest) listing.Iterator[PipelineStateInfo] - // Lists pipelines defined in the Delta Live Tables system. + // Lists pipelines defined in the Spark Declarative Pipelines system. // // This method is generated by Databricks SDK Code Generator. ListPipelinesAll(ctx context.Context, request ListPipelinesRequest) ([]PipelineStateInfo, error) @@ -156,20 +157,21 @@ func NewPipelines(client *client.DatabricksClient) *PipelinesAPI { } } -// The Delta Live Tables API allows you to create, edit, delete, start, and view -// details about pipelines. +// The Lakeflow Spark Declarative Pipelines API allows you to create, edit, +// delete, start, and view details about pipelines. // -// Delta Live Tables is a framework for building reliable, maintainable, and -// testable data processing pipelines. You define the transformations to perform -// on your data, and Delta Live Tables manages task orchestration, cluster -// management, monitoring, data quality, and error handling. +// Spark Declarative Pipelines is a framework for building reliable, +// maintainable, and testable data processing pipelines. You define the +// transformations to perform on your data, and Spark Declarative Pipelines +// manages task orchestration, cluster management, monitoring, data quality, and +// error handling. // // Instead of defining your data pipelines using a series of separate Apache -// Spark tasks, Delta Live Tables manages how your data is transformed based on -// a target schema you define for each processing step. You can also enforce -// data quality with Delta Live Tables expectations. Expectations allow you to -// define expected data quality and specify how to handle records that fail -// those expectations. +// Spark tasks, Spark Declarative Pipelines manages how your data is transformed +// based on a target schema you define for each processing step. You can also +// enforce data quality with Spark Declarative Pipelines expectations. +// Expectations allow you to define expected data quality and specify how to +// handle records that fail those expectations. type PipelinesAPI struct { pipelinesImpl } diff --git a/service/pipelines/impl.go b/service/pipelines/impl.go index 1147be7b8..15b8bf25e 100755 --- a/service/pipelines/impl.go +++ b/service/pipelines/impl.go @@ -118,7 +118,7 @@ func (a *pipelinesImpl) internalListPipelineEvents(ctx context.Context, request return &listPipelineEventsResponse, err } -// Lists pipelines defined in the Delta Live Tables system. +// Lists pipelines defined in the Spark Declarative Pipelines system. func (a *pipelinesImpl) ListPipelines(ctx context.Context, request ListPipelinesRequest) listing.Iterator[PipelineStateInfo] { getNextPage := func(ctx context.Context, req ListPipelinesRequest) (*ListPipelinesResponse, error) { @@ -143,7 +143,7 @@ func (a *pipelinesImpl) ListPipelines(ctx context.Context, request ListPipelines return iterator } -// Lists pipelines defined in the Delta Live Tables system. +// Lists pipelines defined in the Spark Declarative Pipelines system. func (a *pipelinesImpl) ListPipelinesAll(ctx context.Context, request ListPipelinesRequest) ([]PipelineStateInfo, error) { iterator := a.ListPipelines(ctx, request) return listing.ToSlice[PipelineStateInfo](ctx, iterator) diff --git a/service/pipelines/interface.go b/service/pipelines/interface.go index 31816b10d..4118064cd 100755 --- a/service/pipelines/interface.go +++ b/service/pipelines/interface.go @@ -6,20 +6,21 @@ import ( "context" ) -// The Delta Live Tables API allows you to create, edit, delete, start, and view -// details about pipelines. +// The Lakeflow Spark Declarative Pipelines API allows you to create, edit, +// delete, start, and view details about pipelines. // -// Delta Live Tables is a framework for building reliable, maintainable, and -// testable data processing pipelines. You define the transformations to perform -// on your data, and Delta Live Tables manages task orchestration, cluster -// management, monitoring, data quality, and error handling. +// Spark Declarative Pipelines is a framework for building reliable, +// maintainable, and testable data processing pipelines. You define the +// transformations to perform on your data, and Spark Declarative Pipelines +// manages task orchestration, cluster management, monitoring, data quality, and +// error handling. // // Instead of defining your data pipelines using a series of separate Apache -// Spark tasks, Delta Live Tables manages how your data is transformed based on -// a target schema you define for each processing step. You can also enforce -// data quality with Delta Live Tables expectations. Expectations allow you to -// define expected data quality and specify how to handle records that fail -// those expectations. +// Spark tasks, Spark Declarative Pipelines manages how your data is transformed +// based on a target schema you define for each processing step. You can also +// enforce data quality with Spark Declarative Pipelines expectations. +// Expectations allow you to define expected data quality and specify how to +// handle records that fail those expectations. // // Deprecated: Do not use this interface, it will be removed in a future version of the SDK. type PipelinesService interface { @@ -49,7 +50,7 @@ type PipelinesService interface { // Retrieves events for a pipeline. ListPipelineEvents(ctx context.Context, request ListPipelineEventsRequest) (*ListPipelineEventsResponse, error) - // Lists pipelines defined in the Delta Live Tables system. + // Lists pipelines defined in the Spark Declarative Pipelines system. ListPipelines(ctx context.Context, request ListPipelinesRequest) (*ListPipelinesResponse, error) // List updates for an active pipeline. diff --git a/service/pipelines/model.go b/service/pipelines/model.go index d3a5679d0..e9a1a2903 100755 --- a/service/pipelines/model.go +++ b/service/pipelines/model.go @@ -572,8 +572,8 @@ type IngestionGatewayPipelineDefinition struct { GatewayStorageCatalog string `json:"gateway_storage_catalog"` // Optional. The Unity Catalog-compatible name for the gateway storage // location. This is the destination to use for the data that is extracted - // by the gateway. Delta Live Tables system will automatically create the - // storage location under the catalog and schema. + // by the gateway. Spark Declarative Pipelines system will automatically + // create the storage location under the catalog and schema. GatewayStorageName string `json:"gateway_storage_name,omitempty"` // Required, Immutable. The name of the schema for the gateway pipelines's // storage location. @@ -2088,8 +2088,8 @@ type TableSpecificConfig struct { // The SCD type to use to ingest the table. ScdType TableSpecificConfigScdType `json:"scd_type,omitempty"` // The column names specifying the logical order of events in the source - // data. Delta Live Tables uses this sequencing to handle change events that - // arrive out of order. + // data. Spark Declarative Pipelines uses this sequencing to handle change + // events that arrive out of order. SequenceBy []string `json:"sequence_by,omitempty"` // (Optional) Additional custom parameters for Workday Report WorkdayReportParameters *IngestionPipelineDefinitionWorkdayReportParameters `json:"workday_report_parameters,omitempty"` diff --git a/service/pkg.go b/service/pkg.go index 96e82b61c..31b198181 100755 --- a/service/pkg.go +++ b/service/pkg.go @@ -206,7 +206,7 @@ // // - [settings.PersonalComputeAPI]: The Personal Compute enablement setting lets you control which users can use the Personal Compute default policy to create compute resources. // -// - [pipelines.PipelinesAPI]: The Delta Live Tables API allows you to create, edit, delete, start, and view details about pipelines. +// - [pipelines.PipelinesAPI]: The Lakeflow Spark Declarative Pipelines API allows you to create, edit, delete, start, and view details about pipelines. // // - [catalog.PoliciesAPI]: Attribute-Based Access Control (ABAC) provides high leverage governance for enforcing compliance policies in Unity Catalog. // diff --git a/service/vectorsearch/api.go b/service/vectorsearch/api.go index 505de8214..837c32589 100755 --- a/service/vectorsearch/api.go +++ b/service/vectorsearch/api.go @@ -53,6 +53,9 @@ type VectorSearchEndpointsInterface interface { // This method is generated by Databricks SDK Code Generator. ListEndpointsAll(ctx context.Context, request ListEndpointsRequest) ([]EndpointInfo, error) + // Retrieve user-visible metrics for an endpoint + RetrieveUserVisibleMetrics(ctx context.Context, request RetrieveUserVisibleMetricsRequest) (*RetrieveUserVisibleMetricsResponse, error) + // Update the budget policy of an endpoint UpdateEndpointBudgetPolicy(ctx context.Context, request PatchEndpointBudgetPolicyRequest) (*PatchEndpointBudgetPolicyResponse, error) diff --git a/service/vectorsearch/impl.go b/service/vectorsearch/impl.go index dfbc05884..78848be27 100755 --- a/service/vectorsearch/impl.go +++ b/service/vectorsearch/impl.go @@ -88,6 +88,17 @@ func (a *vectorSearchEndpointsImpl) internalListEndpoints(ctx context.Context, r return &listEndpointResponse, err } +func (a *vectorSearchEndpointsImpl) RetrieveUserVisibleMetrics(ctx context.Context, request RetrieveUserVisibleMetricsRequest) (*RetrieveUserVisibleMetricsResponse, error) { + var retrieveUserVisibleMetricsResponse RetrieveUserVisibleMetricsResponse + path := fmt.Sprintf("/api/2.0/vector-search/endpoints/%v/metrics", request.Name) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &retrieveUserVisibleMetricsResponse) + return &retrieveUserVisibleMetricsResponse, err +} + func (a *vectorSearchEndpointsImpl) UpdateEndpointBudgetPolicy(ctx context.Context, request PatchEndpointBudgetPolicyRequest) (*PatchEndpointBudgetPolicyResponse, error) { var patchEndpointBudgetPolicyResponse PatchEndpointBudgetPolicyResponse path := fmt.Sprintf("/api/2.0/vector-search/endpoints/%v/budget-policy", request.EndpointName) diff --git a/service/vectorsearch/interface.go b/service/vectorsearch/interface.go index a5d4569da..3074b650c 100755 --- a/service/vectorsearch/interface.go +++ b/service/vectorsearch/interface.go @@ -23,6 +23,9 @@ type VectorSearchEndpointsService interface { // List all vector search endpoints in the workspace. ListEndpoints(ctx context.Context, request ListEndpointsRequest) (*ListEndpointResponse, error) + // Retrieve user-visible metrics for an endpoint + RetrieveUserVisibleMetrics(ctx context.Context, request RetrieveUserVisibleMetricsRequest) (*RetrieveUserVisibleMetricsResponse, error) + // Update the budget policy of an endpoint UpdateEndpointBudgetPolicy(ctx context.Context, request PatchEndpointBudgetPolicyRequest) (*PatchEndpointBudgetPolicyResponse, error) diff --git a/service/vectorsearch/model.go b/service/vectorsearch/model.go index 2e8076edd..c193e2f77 100755 --- a/service/vectorsearch/model.go +++ b/service/vectorsearch/model.go @@ -348,6 +348,10 @@ const EndpointStatusStateOnline EndpointStatusState = `ONLINE` const EndpointStatusStateProvisioning EndpointStatusState = `PROVISIONING` +const EndpointStatusStateRedState EndpointStatusState = `RED_STATE` + +const EndpointStatusStateYellowState EndpointStatusState = `YELLOW_STATE` + // String representation for [fmt.Print] func (f *EndpointStatusState) String() string { return string(*f) @@ -356,11 +360,11 @@ func (f *EndpointStatusState) String() string { // Set raw string value and validate it against allowed values func (f *EndpointStatusState) Set(v string) error { switch v { - case `OFFLINE`, `ONLINE`, `PROVISIONING`: + case `OFFLINE`, `ONLINE`, `PROVISIONING`, `RED_STATE`, `YELLOW_STATE`: *f = EndpointStatusState(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "OFFLINE", "ONLINE", "PROVISIONING"`, v) + return fmt.Errorf(`value "%s" is not one of "OFFLINE", "ONLINE", "PROVISIONING", "RED_STATE", "YELLOW_STATE"`, v) } } @@ -372,6 +376,8 @@ func (f *EndpointStatusState) Values() []EndpointStatusState { EndpointStatusStateOffline, EndpointStatusStateOnline, EndpointStatusStateProvisioning, + EndpointStatusStateRedState, + EndpointStatusStateYellowState, } } @@ -532,6 +538,70 @@ func (s MapStringValueEntry) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Metric specification +type Metric struct { + // Metric labels + Labels []MetricLabel `json:"labels,omitempty"` + // Metric name + Name string `json:"name,omitempty"` + // Percentile for the metric + Percentile float64 `json:"percentile,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *Metric) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Metric) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Label for a metric +type MetricLabel struct { + // Label name + Name string `json:"name,omitempty"` + // Label value + Value string `json:"value,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *MetricLabel) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s MetricLabel) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Single metric value at a specific timestamp +type MetricValue struct { + // Timestamp of the metric value (milliseconds since epoch) + Timestamp int64 `json:"timestamp,omitempty"` + // Metric value + Value float64 `json:"value,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *MetricValue) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s MetricValue) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Collection of metric values for a specific metric +type MetricValues struct { + // Metric specification + Metric *Metric `json:"metric,omitempty"` + // Time series of metric values + Values []MetricValue `json:"values,omitempty"` +} + type MiniVectorIndex struct { // The user who created the index. Creator string `json:"creator,omitempty"` @@ -668,7 +738,8 @@ type QueryVectorIndexRequest struct { NumResults int `json:"num_results,omitempty"` // Query text. Required for Delta Sync Index using model endpoint. QueryText string `json:"query_text,omitempty"` - // The query type to use. Choices are `ANN` and `HYBRID`. Defaults to `ANN`. + // The query type to use. Choices are `ANN` and `HYBRID` and `FULL_TEXT`. + // Defaults to `ANN`. QueryType string `json:"query_type,omitempty"` // Query vector. Required for Direct Vector Access Index and Delta Sync // Index using self-managed vectors. @@ -767,6 +838,51 @@ func (s ResultManifest) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Request to retrieve user-visible metrics +type RetrieveUserVisibleMetricsRequest struct { + // End time for metrics query + EndTime string `json:"end_time,omitempty"` + // Granularity in seconds + GranularityInSeconds int `json:"granularity_in_seconds,omitempty"` + // List of metrics to retrieve + Metrics []Metric `json:"metrics,omitempty"` + // Vector search endpoint name + Name string `json:"-" url:"-"` + // Token for pagination + PageToken string `json:"page_token,omitempty"` + // Start time for metrics query + StartTime string `json:"start_time,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *RetrieveUserVisibleMetricsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RetrieveUserVisibleMetricsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Response containing user-visible metrics +type RetrieveUserVisibleMetricsResponse struct { + // Collection of metric values + MetricValues []MetricValues `json:"metric_values,omitempty"` + // A token that can be used to get the next page of results. If not present, + // there are no more results to show. + NextPageToken string `json:"next_page_token,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *RetrieveUserVisibleMetricsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RetrieveUserVisibleMetricsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type ScanVectorIndexRequest struct { // Name of the vector index to scan. IndexName string `json:"-" url:"-"` diff --git a/workspace_client.go b/workspace_client.go index cf1c2a83f..3f4c7e850 100755 --- a/workspace_client.go +++ b/workspace_client.go @@ -638,20 +638,21 @@ type WorkspaceClient struct { // [Access Control]: https://docs.databricks.com/security/auth-authz/access-control/index.html Permissions iam.PermissionsInterface - // The Delta Live Tables API allows you to create, edit, delete, start, and - // view details about pipelines. + // The Lakeflow Spark Declarative Pipelines API allows you to create, edit, + // delete, start, and view details about pipelines. // - // Delta Live Tables is a framework for building reliable, maintainable, and - // testable data processing pipelines. You define the transformations to - // perform on your data, and Delta Live Tables manages task orchestration, - // cluster management, monitoring, data quality, and error handling. + // Spark Declarative Pipelines is a framework for building reliable, + // maintainable, and testable data processing pipelines. You define the + // transformations to perform on your data, and Spark Declarative Pipelines + // manages task orchestration, cluster management, monitoring, data quality, + // and error handling. // // Instead of defining your data pipelines using a series of separate Apache - // Spark tasks, Delta Live Tables manages how your data is transformed based - // on a target schema you define for each processing step. You can also - // enforce data quality with Delta Live Tables expectations. Expectations - // allow you to define expected data quality and specify how to handle - // records that fail those expectations. + // Spark tasks, Spark Declarative Pipelines manages how your data is + // transformed based on a target schema you define for each processing step. + // You can also enforce data quality with Spark Declarative Pipelines + // expectations. Expectations allow you to define expected data quality and + // specify how to handle records that fail those expectations. Pipelines pipelines.PipelinesInterface // Attribute-Based Access Control (ABAC) provides high leverage governance